From 0b51891e5977b87f986f4db2cbbe09295cfdbedc Mon Sep 17 00:00:00 2001 From: "hwajeong.son" Date: Mon, 20 Aug 2018 13:30:55 +0900 Subject: [PATCH] Tizen_4.0 base Signed-off-by: hwajeong.son --- AUTHORS | 1854 ++ CHANGELOG.md | 40 + CONTRIBUTING.md | 455 + Dockerfile | 227 + Dockerfile.aarch64 | 201 + Dockerfile.armhf | 181 + Dockerfile.build.aarch64 | 17 + Dockerfile.build.arm | 18 + Dockerfile.build.i386 | 13 + Dockerfile.build.x86_64 | 13 + Dockerfile.ppc64le | 188 + Dockerfile.s390x | 181 + Dockerfile.simple | 72 + Dockerfile.solaris | 19 + Dockerfile.windows | 256 + FAQ.md | 24 + LICENSE | 191 + MAINTAINERS | 462 + Makefile | 202 + NOTICE | 19 + README.md | 111 +- ROADMAP.md | 118 + VENDORING.md | 46 + VERSION | 1 + api/README.md | 42 + api/common.go | 65 + api/common_test.go | 77 + api/common_unix.go | 6 + api/common_windows.go | 8 + api/errors/errors.go | 47 + api/errors/errors_test.go | 64 + api/fixtures/keyfile | 7 + api/names.go | 9 + api/server/backend/build/backend.go | 90 + api/server/backend/build/tag.go | 84 + api/server/httputils/decoder.go | 16 + api/server/httputils/errors.go | 145 + api/server/httputils/form.go | 70 + api/server/httputils/form_test.go | 105 + api/server/httputils/httputils.go | 97 + api/server/httputils/httputils_test.go | 18 + api/server/httputils/httputils_write_json.go | 15 + api/server/httputils/write_log_stream.go | 94 + api/server/middleware.go | 24 + api/server/middleware/cors.go | 37 + api/server/middleware/debug.go | 76 + api/server/middleware/experimental.go | 29 + api/server/middleware/middleware.go | 13 + api/server/middleware/version.go | 51 + api/server/middleware/version_test.go | 57 + api/server/profiler.go | 46 + api/server/router/build/backend.go | 21 + api/server/router/build/build.go | 29 + api/server/router/build/build_routes.go | 262 + api/server/router/checkpoint/backend.go | 10 + api/server/router/checkpoint/checkpoint.go | 36 + api/server/router/checkpoint/checkpoint_routes.go | 65 + api/server/router/container/backend.go | 79 + api/server/router/container/container.go | 77 + api/server/router/container/container_routes.go | 608 + api/server/router/container/copy.go | 118 + api/server/router/container/exec.go | 140 + api/server/router/container/inspect.go | 21 + api/server/router/distribution/backend.go | 14 + api/server/router/distribution/distribution.go | 31 + .../router/distribution/distribution_routes.go | 138 + api/server/router/experimental.go | 67 + api/server/router/image/backend.go | 47 + api/server/router/image/image.go | 51 + api/server/router/image/image_routes.go | 400 + api/server/router/local.go | 104 + api/server/router/network/backend.go | 22 + api/server/router/network/filter.go | 87 + api/server/router/network/filter_test.go | 149 + api/server/router/network/network.go | 44 + api/server/router/network/network_routes.go | 472 + api/server/router/plugin/backend.go | 26 + api/server/router/plugin/plugin.go | 39 + api/server/router/plugin/plugin_routes.go | 310 + api/server/router/router.go | 19 + api/server/router/session/backend.go | 12 + api/server/router/session/session.go | 29 + api/server/router/session/session_routes.go | 16 + api/server/router/swarm/backend.go | 47 + api/server/router/swarm/cluster.go | 63 + api/server/router/swarm/cluster_routes.go | 492 + api/server/router/swarm/helpers.go | 65 + api/server/router/system/backend.go | 21 + api/server/router/system/system.go | 42 + api/server/router/system/system_routes.go | 192 + api/server/router/volume/backend.go | 19 + api/server/router/volume/volume.go | 36 + api/server/router/volume/volume_routes.go | 85 + api/server/router_swapper.go | 30 + api/server/server.go | 208 + api/server/server_test.go | 46 + api/swagger-gen.yaml | 12 + api/swagger.yaml | 8584 +++++++++ api/templates/server/operation.gotmpl | 26 + api/types/auth.go | 22 + api/types/backend/backend.go | 104 + api/types/backend/build.go | 44 + api/types/blkiodev/blkio.go | 23 + api/types/client.go | 390 + api/types/configs.go | 70 + api/types/container/config.go | 69 + api/types/container/container_changes.go | 21 + api/types/container/container_create.go | 21 + api/types/container/container_top.go | 21 + api/types/container/container_update.go | 17 + api/types/container/container_wait.go | 17 + api/types/container/host_config.go | 380 + api/types/container/hostconfig_unix.go | 41 + api/types/container/hostconfig_windows.go | 54 + api/types/container/waitcondition.go | 22 + api/types/error_response.go | 13 + api/types/events/events.go | 50 + api/types/filters/parse.go | 310 + api/types/filters/parse_test.go | 417 + api/types/graph_driver_data.go | 17 + api/types/id_response.go | 13 + api/types/image/image_history.go | 37 + api/types/image_delete_response_item.go | 15 + api/types/image_summary.go | 49 + api/types/mount/mount.go | 128 + api/types/network/network.go | 108 + api/types/plugin.go | 200 + api/types/plugin_device.go | 25 + api/types/plugin_env.go | 25 + api/types/plugin_interface_type.go | 21 + api/types/plugin_mount.go | 37 + api/types/plugin_responses.go | 79 + api/types/plugins/logdriver/entry.pb.go | 449 + api/types/plugins/logdriver/entry.proto | 8 + api/types/plugins/logdriver/gen.go | 3 + api/types/plugins/logdriver/io.go | 87 + api/types/port.go | 23 + api/types/registry/authenticate.go | 21 + api/types/registry/registry.go | 119 + api/types/seccomp.go | 93 + api/types/service_update_response.go | 12 + api/types/stats.go | 181 + api/types/strslice/strslice.go | 30 + api/types/strslice/strslice_test.go | 86 + api/types/swarm/common.go | 40 + api/types/swarm/config.go | 31 + api/types/swarm/container.go | 72 + api/types/swarm/network.go | 119 + api/types/swarm/node.go | 115 + api/types/swarm/runtime.go | 19 + api/types/swarm/secret.go | 31 + api/types/swarm/service.go | 124 + api/types/swarm/swarm.go | 217 + api/types/swarm/task.go | 149 + api/types/time/duration_convert.go | 12 + api/types/time/duration_convert_test.go | 26 + api/types/time/timestamp.go | 124 + api/types/time/timestamp_test.go | 93 + api/types/types.go | 575 + api/types/versions/README.md | 14 + api/types/versions/compare.go | 62 + api/types/versions/compare_test.go | 26 + api/types/versions/v1p19/types.go | 35 + api/types/versions/v1p20/types.go | 40 + api/types/volume.go | 61 + api/types/volume/volumes_create.go | 29 + api/types/volume/volumes_list.go | 23 + build-allarch.sh | 18 + build-arm.sh | 30 + build.sh | 42 + builder/builder.go | 105 + builder/dockerfile/bflag.go | 183 + builder/dockerfile/bflag_test.go | 187 + builder/dockerfile/buildargs.go | 148 + builder/dockerfile/buildargs_test.go | 100 + builder/dockerfile/builder.go | 421 + builder/dockerfile/builder_test.go | 34 + builder/dockerfile/builder_unix.go | 7 + builder/dockerfile/builder_windows.go | 8 + builder/dockerfile/clientsession.go | 78 + builder/dockerfile/command/command.go | 46 + builder/dockerfile/containerbackend.go | 144 + builder/dockerfile/copy.go | 444 + builder/dockerfile/copy_test.go | 45 + builder/dockerfile/copy_unix.go | 36 + builder/dockerfile/copy_windows.go | 8 + builder/dockerfile/dispatchers.go | 884 + builder/dockerfile/dispatchers_test.go | 525 + builder/dockerfile/dispatchers_unix.go | 34 + builder/dockerfile/dispatchers_unix_test.go | 33 + builder/dockerfile/dispatchers_windows.go | 93 + builder/dockerfile/dispatchers_windows_test.go | 40 + builder/dockerfile/envVarTest | 121 + builder/dockerfile/evaluator.go | 327 + builder/dockerfile/evaluator_test.go | 210 + builder/dockerfile/evaluator_unix.go | 9 + builder/dockerfile/evaluator_windows.go | 13 + builder/dockerfile/imagecontext.go | 211 + builder/dockerfile/imageprobe.go | 63 + builder/dockerfile/internals.go | 310 + builder/dockerfile/internals_test.go | 131 + builder/dockerfile/internals_unix.go | 42 + builder/dockerfile/internals_windows.go | 95 + builder/dockerfile/internals_windows_test.go | 53 + builder/dockerfile/metrics.go | 44 + builder/dockerfile/mockbackend_test.go | 130 + builder/dockerfile/parser/dumper/main.go | 32 + builder/dockerfile/parser/json_test.go | 59 + builder/dockerfile/parser/line_parsers.go | 399 + builder/dockerfile/parser/line_parsers_test.go | 74 + builder/dockerfile/parser/parser.go | 355 + builder/dockerfile/parser/parser_test.go | 154 + builder/dockerfile/parser/split_command.go | 118 + builder/dockerfile/parser/testfile-line/Dockerfile | 31 + .../testfiles-negative/env_no_value/Dockerfile | 3 + .../shykes-nested-json/Dockerfile | 1 + .../parser/testfiles/ADD-COPY-with-JSON/Dockerfile | 11 + .../parser/testfiles/ADD-COPY-with-JSON/result | 10 + .../testfiles/brimstone-consuldock/Dockerfile | 26 + .../parser/testfiles/brimstone-consuldock/result | 5 + .../testfiles/brimstone-docker-consul/Dockerfile | 52 + .../testfiles/brimstone-docker-consul/result | 9 + .../parser/testfiles/continue-at-eof/Dockerfile | 3 + .../parser/testfiles/continue-at-eof/result | 2 + .../parser/testfiles/continueIndent/Dockerfile | 36 + .../parser/testfiles/continueIndent/result | 10 + .../parser/testfiles/cpuguy83-nagios/Dockerfile | 54 + .../parser/testfiles/cpuguy83-nagios/result | 40 + .../dockerfile/parser/testfiles/docker/Dockerfile | 102 + builder/dockerfile/parser/testfiles/docker/result | 24 + builder/dockerfile/parser/testfiles/env/Dockerfile | 23 + builder/dockerfile/parser/testfiles/env/result | 16 + .../testfiles/escape-after-comment/Dockerfile | 9 + .../parser/testfiles/escape-after-comment/result | 3 + .../parser/testfiles/escape-nonewline/Dockerfile | 7 + .../parser/testfiles/escape-nonewline/result | 3 + .../dockerfile/parser/testfiles/escape/Dockerfile | 6 + builder/dockerfile/parser/testfiles/escape/result | 3 + .../dockerfile/parser/testfiles/escapes/Dockerfile | 14 + builder/dockerfile/parser/testfiles/escapes/result | 6 + .../dockerfile/parser/testfiles/flags/Dockerfile | 10 + builder/dockerfile/parser/testfiles/flags/result | 10 + .../dockerfile/parser/testfiles/health/Dockerfile | 10 + builder/dockerfile/parser/testfiles/health/result | 9 + .../parser/testfiles/influxdb/Dockerfile | 15 + .../dockerfile/parser/testfiles/influxdb/result | 11 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../jeztah-invalid-json-json-inside-string/result | 1 + .../jeztah-invalid-json-single-quotes/Dockerfile | 1 + .../jeztah-invalid-json-single-quotes/result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../jeztah-invalid-json-unterminated-string/result | 1 + .../dockerfile/parser/testfiles/json/Dockerfile | 8 + builder/dockerfile/parser/testfiles/json/result | 8 + .../kartar-entrypoint-oddities/Dockerfile | 7 + .../testfiles/kartar-entrypoint-oddities/result | 7 + .../lk4d4-the-edge-case-generator/Dockerfile | 48 + .../testfiles/lk4d4-the-edge-case-generator/result | 29 + .../dockerfile/parser/testfiles/mail/Dockerfile | 16 + builder/dockerfile/parser/testfiles/mail/result | 14 + .../parser/testfiles/multiple-volumes/Dockerfile | 3 + .../parser/testfiles/multiple-volumes/result | 2 + .../dockerfile/parser/testfiles/mumble/Dockerfile | 7 + builder/dockerfile/parser/testfiles/mumble/result | 4 + .../dockerfile/parser/testfiles/nginx/Dockerfile | 14 + builder/dockerfile/parser/testfiles/nginx/result | 11 + builder/dockerfile/parser/testfiles/tf2/Dockerfile | 23 + builder/dockerfile/parser/testfiles/tf2/result | 20 + .../dockerfile/parser/testfiles/weechat/Dockerfile | 9 + builder/dockerfile/parser/testfiles/weechat/result | 6 + builder/dockerfile/parser/testfiles/znc/Dockerfile | 7 + builder/dockerfile/parser/testfiles/znc/result | 5 + builder/dockerfile/shell_parser.go | 344 + builder/dockerfile/shell_parser_test.go | 151 + builder/dockerfile/support.go | 19 + builder/dockerfile/support_test.go | 65 + builder/dockerfile/utils_test.go | 50 + builder/dockerfile/wordsTest | 30 + builder/dockerignore/dockerignore.go | 64 + builder/dockerignore/dockerignore_test.go | 69 + builder/fscache/fscache.go | 602 + builder/fscache/fscache_test.go | 131 + builder/fscache/naivedriver.go | 28 + builder/remotecontext/archive.go | 128 + builder/remotecontext/detect.go | 184 + builder/remotecontext/detect_test.go | 123 + builder/remotecontext/filehash.go | 45 + builder/remotecontext/generate.go | 3 + builder/remotecontext/git.go | 29 + builder/remotecontext/git/gitutils.go | 159 + builder/remotecontext/git/gitutils_test.go | 238 + builder/remotecontext/lazycontext.go | 101 + builder/remotecontext/mimetype.go | 27 + builder/remotecontext/mimetype_test.go | 16 + builder/remotecontext/remote.go | 134 + builder/remotecontext/remote_test.go | 263 + builder/remotecontext/tarsum.go | 174 + builder/remotecontext/tarsum.pb.go | 525 + builder/remotecontext/tarsum.proto | 7 + builder/remotecontext/tarsum_test.go | 157 + builder/remotecontext/utils_test.go | 55 + cli/cobra.go | 150 + cli/config/configdir.go | 25 + cli/debug/debug.go | 26 + cli/debug/debug_test.go | 43 + cli/error.go | 33 + cli/required.go | 27 + client/README.md | 35 + client/build_prune.go | 30 + client/checkpoint_create.go | 13 + client/checkpoint_create_test.go | 73 + client/checkpoint_delete.go | 20 + client/checkpoint_delete_test.go | 54 + client/checkpoint_list.go | 32 + client/checkpoint_list_test.go | 68 + client/client.go | 309 + client/client_mock_test.go | 45 + client/client_test.go | 344 + client/client_unix.go | 6 + client/client_windows.go | 4 + client/config_create.go | 25 + client/config_create_test.go | 69 + client/config_inspect.go | 37 + client/config_inspect_test.go | 78 + client/config_list.go | 38 + client/config_list_test.go | 106 + client/config_remove.go | 13 + client/config_remove_test.go | 59 + client/config_update.go | 21 + client/config_update_test.go | 60 + client/container_attach.go | 37 + client/container_commit.go | 55 + client/container_commit_test.go | 96 + client/container_copy.go | 102 + client/container_copy_test.go | 244 + client/container_create.go | 56 + client/container_create_test.go | 118 + client/container_diff.go | 23 + client/container_diff_test.go | 61 + client/container_exec.go | 54 + client/container_exec_test.go | 157 + client/container_export.go | 20 + client/container_export_test.go | 50 + client/container_inspect.go | 54 + client/container_inspect_test.go | 125 + client/container_kill.go | 17 + client/container_kill_test.go | 46 + client/container_list.go | 56 + client/container_list_test.go | 96 + client/container_logs.go | 52 + client/container_logs_test.go | 133 + client/container_pause.go | 10 + client/container_pause_test.go | 41 + client/container_prune.go | 36 + client/container_prune_test.go | 124 + client/container_remove.go | 27 + client/container_remove_test.go | 59 + client/container_rename.go | 16 + client/container_rename_test.go | 46 + client/container_resize.go | 29 + client/container_resize_test.go | 82 + client/container_restart.go | 22 + client/container_restart_test.go | 48 + client/container_start.go | 24 + client/container_start_test.go | 58 + client/container_stats.go | 26 + client/container_stats_test.go | 70 + client/container_stop.go | 21 + client/container_stop_test.go | 48 + client/container_top.go | 28 + client/container_top_test.go | 74 + client/container_unpause.go | 10 + client/container_unpause_test.go | 41 + client/container_update.go | 22 + client/container_update_test.go | 58 + client/container_wait.go | 84 + client/container_wait_test.go | 74 + client/disk_usage.go | 26 + client/disk_usage_test.go | 55 + client/distribution_inspect.go | 35 + client/distribution_inspect_test.go | 18 + client/errors.go | 300 + client/events.go | 102 + client/events_test.go | 165 + client/hijack.go | 206 + client/image_build.go | 134 + client/image_build_test.go | 233 + client/image_create.go | 34 + client/image_create_test.go | 76 + client/image_delta.go | 22 + client/image_history.go | 22 + client/image_history_test.go | 60 + client/image_import.go | 37 + client/image_import_test.go | 81 + client/image_inspect.go | 33 + client/image_inspect_test.go | 71 + client/image_list.go | 45 + client/image_list_test.go | 159 + client/image_load.go | 30 + client/image_load_test.go | 95 + client/image_prune.go | 36 + client/image_prune_test.go | 119 + client/image_pull.go | 61 + client/image_pull_test.go | 199 + client/image_push.go | 56 + client/image_push_test.go | 180 + client/image_remove.go | 31 + client/image_remove_test.go | 95 + client/image_save.go | 22 + client/image_save_test.go | 58 + client/image_search.go | 51 + client/image_search_test.go | 165 + client/image_tag.go | 37 + client/image_tag_test.go | 143 + client/info.go | 26 + client/info_test.go | 76 + client/interface.go | 195 + client/interface_experimental.go | 17 + client/interface_stable.go | 10 + client/login.go | 29 + client/network_connect.go | 18 + client/network_connect_test.go | 111 + client/network_create.go | 25 + client/network_create_test.go | 72 + client/network_disconnect.go | 14 + client/network_disconnect_test.go | 64 + client/network_inspect.go | 50 + client/network_inspect_test.go | 107 + client/network_list.go | 31 + client/network_list_test.go | 108 + client/network_prune.go | 36 + client/network_prune_test.go | 112 + client/network_remove.go | 10 + client/network_remove_test.go | 47 + client/node_inspect.go | 33 + client/node_inspect_test.go | 65 + client/node_list.go | 36 + client/node_list_test.go | 94 + client/node_remove.go | 21 + client/node_remove_test.go | 69 + client/node_update.go | 18 + client/node_update_test.go | 49 + client/parse_logs.go | 41 + client/parse_logs_test.go | 36 + client/ping.go | 32 + client/ping_test.go | 82 + client/plugin_create.go | 26 + client/plugin_disable.go | 19 + client/plugin_disable_test.go | 48 + client/plugin_enable.go | 19 + client/plugin_enable_test.go | 48 + client/plugin_inspect.go | 32 + client/plugin_inspect_test.go | 54 + client/plugin_install.go | 113 + client/plugin_list.go | 32 + client/plugin_list_test.go | 107 + client/plugin_push.go | 17 + client/plugin_push_test.go | 51 + client/plugin_remove.go | 20 + client/plugin_remove_test.go | 49 + client/plugin_set.go | 12 + client/plugin_set_test.go | 47 + client/plugin_upgrade.go | 39 + client/request.go | 262 + client/request_test.go | 92 + client/secret_create.go | 25 + client/secret_create_test.go | 69 + client/secret_inspect.go | 37 + client/secret_inspect_test.go | 78 + client/secret_list.go | 38 + client/secret_list_test.go | 106 + client/secret_remove.go | 13 + client/secret_remove_test.go | 59 + client/secret_update.go | 21 + client/secret_update_test.go | 60 + client/service_create.go | 112 + client/service_create_test.go | 210 + client/service_inspect.go | 38 + client/service_inspect_test.go | 66 + client/service_list.go | 35 + client/service_list_test.go | 94 + client/service_logs.go | 52 + client/service_logs_test.go | 133 + client/service_remove.go | 10 + client/service_remove_test.go | 47 + client/service_update.go | 72 + client/service_update_test.go | 77 + client/session.go | 19 + client/session/filesync/diffcopy.go | 30 + client/session/filesync/filesync.go | 173 + client/session/filesync/filesync.pb.go | 575 + client/session/filesync/filesync.proto | 15 + client/session/filesync/generate.go | 3 + client/session/filesync/tarstream.go | 83 + client/session/grpc.go | 62 + client/session/manager.go | 187 + client/session/session.go | 117 + client/swarm_get_unlock_key.go | 21 + client/swarm_get_unlock_key_test.go | 60 + client/swarm_init.go | 21 + client/swarm_init_test.go | 54 + client/swarm_inspect.go | 21 + client/swarm_inspect_test.go | 56 + client/swarm_join.go | 13 + client/swarm_join_test.go | 51 + client/swarm_leave.go | 18 + client/swarm_leave_test.go | 66 + client/swarm_unlock.go | 13 + client/swarm_unlock_test.go | 49 + client/swarm_update.go | 22 + client/swarm_update_test.go | 49 + client/task_inspect.go | 34 + client/task_inspect_test.go | 54 + client/task_list.go | 35 + client/task_list_test.go | 94 + client/task_logs.go | 52 + client/testdata/ca.pem | 18 + client/testdata/cert.pem | 18 + client/testdata/key.pem | 27 + client/transport.go | 25 + client/utils.go | 34 + client/version.go | 21 + client/volume_create.go | 21 + client/volume_create_test.go | 75 + client/volume_inspect.go | 38 + client/volume_inspect_test.go | 76 + client/volume_list.go | 32 + client/volume_list_test.go | 98 + client/volume_prune.go | 36 + client/volume_remove.go | 21 + client/volume_remove_test.go | 47 + cmd/docker/main.go | 41 + cmd/dockerd/README.md | 3 + cmd/dockerd/config.go | 77 + cmd/dockerd/config_common_unix.go | 34 + cmd/dockerd/config_experimental.go | 9 + cmd/dockerd/config_solaris.go | 19 + cmd/dockerd/config_unix.go | 52 + cmd/dockerd/config_unix_test.go | 26 + cmd/dockerd/config_windows.go | 25 + cmd/dockerd/daemon.go | 571 + cmd/dockerd/daemon_freebsd.go | 9 + cmd/dockerd/daemon_linux.go | 15 + cmd/dockerd/daemon_solaris.go | 89 + cmd/dockerd/daemon_test.go | 145 + cmd/dockerd/daemon_unix.go | 125 + cmd/dockerd/daemon_unix_test.go | 114 + cmd/dockerd/daemon_windows.go | 98 + cmd/dockerd/docker.go | 110 + cmd/dockerd/docker_windows.go | 19 + cmd/dockerd/hack/malformed_host_override.go | 121 + cmd/dockerd/hack/malformed_host_override_test.go | 124 + cmd/dockerd/metrics.go | 27 + cmd/dockerd/options.go | 123 + cmd/dockerd/options_test.go | 43 + cmd/dockerd/service_unsupported.go | 14 + cmd/dockerd/service_windows.go | 431 + cmd/mobynit/main.go | 109 + container/archive.go | 76 + container/container.go | 1067 ++ container/container_linux.go | 9 + container/container_notlinux.go | 23 + container/container_unit_test.go | 68 + container/container_unix.go | 475 + container/container_windows.go | 210 + container/env.go | 43 + container/env_test.go | 24 + container/health.go | 50 + container/history.go | 30 + container/memory_store.go | 95 + container/memory_store_test.go | 106 + container/monitor.go | 46 + container/mounts_unix.go | 12 + container/mounts_windows.go | 8 + container/state.go | 378 + container/state_solaris.go | 7 + container/state_test.go | 168 + container/state_unix.go | 10 + container/state_windows.go | 7 + container/store.go | 28 + container/stream/attach.go | 179 + container/stream/streams.go | 146 + container/view.go | 302 + container/view_test.go | 106 + contrib/README.md | 4 + contrib/REVIEWERS | 1 + contrib/apparmor/main.go | 56 + contrib/apparmor/template.go | 268 + contrib/builder/deb/aarch64/build.sh | 10 + .../builder/deb/aarch64/debian-jessie/Dockerfile | 25 + .../builder/deb/aarch64/debian-stretch/Dockerfile | 22 + contrib/builder/deb/aarch64/generate.sh | 135 + .../builder/deb/aarch64/ubuntu-trusty/Dockerfile | 24 + .../builder/deb/aarch64/ubuntu-xenial/Dockerfile | 22 + contrib/builder/deb/amd64/README.md | 5 + contrib/builder/deb/amd64/build.sh | 10 + contrib/builder/deb/amd64/debian-jessie/Dockerfile | 20 + .../builder/deb/amd64/debian-stretch/Dockerfile | 20 + contrib/builder/deb/amd64/debian-wheezy/Dockerfile | 22 + contrib/builder/deb/amd64/generate.sh | 130 + contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile | 16 + contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile | 16 + .../builder/deb/amd64/ubuntu-yakkety/Dockerfile | 16 + contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile | 16 + contrib/builder/deb/armhf/debian-jessie/Dockerfile | 20 + contrib/builder/deb/armhf/generate.sh | 139 + .../builder/deb/armhf/raspbian-jessie/Dockerfile | 22 + contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile | 16 + contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile | 16 + .../builder/deb/armhf/ubuntu-yakkety/Dockerfile | 16 + contrib/builder/deb/ppc64le/build.sh | 10 + contrib/builder/deb/ppc64le/generate.sh | 101 + .../builder/deb/ppc64le/ubuntu-trusty/Dockerfile | 16 + .../builder/deb/ppc64le/ubuntu-xenial/Dockerfile | 16 + .../builder/deb/ppc64le/ubuntu-yakkety/Dockerfile | 16 + contrib/builder/deb/s390x/build.sh | 10 + contrib/builder/deb/s390x/generate.sh | 94 + contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile | 16 + .../builder/deb/s390x/ubuntu-yakkety/Dockerfile | 16 + contrib/builder/rpm/amd64/README.md | 5 + .../rpm/amd64/amazonlinux-latest/Dockerfile | 18 + contrib/builder/rpm/amd64/build.sh | 10 + contrib/builder/rpm/amd64/centos-7/Dockerfile | 19 + contrib/builder/rpm/amd64/fedora-24/Dockerfile | 19 + contrib/builder/rpm/amd64/fedora-25/Dockerfile | 19 + contrib/builder/rpm/amd64/generate.sh | 187 + contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile | 18 + contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile | 28 + contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile | 18 + contrib/builder/rpm/amd64/photon-1.0/Dockerfile | 18 + contrib/builder/rpm/armhf/build.sh | 10 + contrib/builder/rpm/armhf/centos-7/Dockerfile | 20 + contrib/builder/rpm/armhf/generate.sh | 122 + contrib/builder/rpm/ppc64le/build.sh | 10 + contrib/builder/rpm/ppc64le/centos-7/Dockerfile | 19 + contrib/builder/rpm/ppc64le/fedora-24/Dockerfile | 19 + contrib/builder/rpm/ppc64le/generate.sh | 139 + .../builder/rpm/ppc64le/opensuse-42.1/Dockerfile | 20 + contrib/builder/rpm/s390x/build.sh | 10 + .../rpm/s390x/clefos-base-s390x-7/Dockerfile | 19 + contrib/builder/rpm/s390x/generate.sh | 144 + .../rpm/s390x/opensuse-tumbleweed-1/Dockerfile | 20 + contrib/check-config.sh | 360 + contrib/desktop-integration/README.md | 11 + contrib/desktop-integration/chromium/Dockerfile | 36 + contrib/desktop-integration/gparted/Dockerfile | 31 + contrib/docker-device-tool/README.md | 14 + contrib/docker-device-tool/device_tool.go | 176 + contrib/docker-device-tool/device_tool_windows.go | 4 + contrib/docker-machine-install-bundle.sh | 111 + contrib/dockerize-disk.sh | 118 + contrib/download-frozen-image-v1.sh | 108 + contrib/download-frozen-image-v2.sh | 307 + contrib/editorconfig | 13 + contrib/gitdm/aliases | 148 + contrib/gitdm/domain-map | 47 + contrib/gitdm/generate_aliases.sh | 16 + contrib/gitdm/gitdm.config | 17 + contrib/httpserver/Dockerfile | 4 + contrib/httpserver/Dockerfile.solaris | 4 + contrib/httpserver/server.go | 12 + contrib/init/openrc/docker.confd | 23 + contrib/init/openrc/docker.initd | 24 + contrib/init/systemd/REVIEWERS | 3 + contrib/init/systemd/docker.service | 34 + contrib/init/systemd/docker.service.rpm | 33 + contrib/init/systemd/docker.socket | 12 + contrib/init/sysvinit-debian/docker | 156 + contrib/init/sysvinit-debian/docker.default | 20 + contrib/init/sysvinit-redhat/docker | 153 + contrib/init/sysvinit-redhat/docker.sysconfig | 7 + contrib/init/upstart/REVIEWERS | 2 + contrib/init/upstart/docker.conf | 72 + contrib/install.sh | 53 + contrib/mac-install-bundle.sh | 45 + contrib/mkimage-alpine.sh | 90 + contrib/mkimage-arch-pacman.conf | 92 + contrib/mkimage-arch.sh | 126 + contrib/mkimage-archarm-pacman.conf | 98 + contrib/mkimage-crux.sh | 75 + contrib/mkimage-pld.sh | 73 + contrib/mkimage-yum.sh | 136 + contrib/mkimage.sh | 128 + contrib/mkimage/.febootstrap-minimize | 28 + contrib/mkimage/busybox-static | 34 + contrib/mkimage/debootstrap | 251 + contrib/mkimage/mageia-urpmi | 61 + contrib/mkimage/rinse | 25 + contrib/mkimage/solaris | 89 + contrib/nnp-test/Dockerfile | 9 + contrib/nnp-test/nnp-test.c | 10 + contrib/nuke-graph-directory.sh | 64 + contrib/project-stats.sh | 22 + contrib/report-issue.sh | 105 + contrib/reprepro/suites.sh | 12 + contrib/syntax/nano/Dockerfile.nanorc | 26 + contrib/syntax/nano/README.md | 32 + .../Preferences/Dockerfile.tmPreferences | 24 + .../Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage | 160 + contrib/syntax/textmate/Docker.tmbundle/info.plist | 16 + contrib/syntax/textmate/README.md | 17 + contrib/syntax/textmate/REVIEWERS | 1 + contrib/syntax/vim/LICENSE | 22 + contrib/syntax/vim/README.md | 26 + contrib/syntax/vim/doc/dockerfile.txt | 18 + contrib/syntax/vim/ftdetect/dockerfile.vim | 1 + contrib/syntax/vim/syntax/dockerfile.vim | 31 + contrib/syscall-test/Dockerfile | 15 + contrib/syscall-test/acct.c | 16 + contrib/syscall-test/exit32.s | 7 + contrib/syscall-test/ns.c | 63 + contrib/syscall-test/raw.c | 14 + contrib/syscall-test/setgid.c | 11 + contrib/syscall-test/setuid.c | 11 + contrib/syscall-test/socket.c | 30 + contrib/syscall-test/userns.c | 63 + contrib/udev/80-docker.rules | 3 + contrib/vagrant-docker/README.md | 50 + daemon/apparmor_default.go | 36 + daemon/apparmor_default_unsupported.go | 7 + daemon/archive.go | 362 + daemon/archive_tarcopyoptions.go | 15 + daemon/archive_tarcopyoptions_unix.go | 25 + daemon/archive_tarcopyoptions_windows.go | 12 + daemon/archive_unix.go | 29 + daemon/archive_windows.go | 39 + daemon/attach.go | 186 + daemon/auth.go | 13 + daemon/bindmount_solaris.go | 5 + daemon/bindmount_unix.go | 5 + daemon/build.go | 196 + daemon/cache.go | 27 + daemon/caps/utils_unix.go | 131 + daemon/changes.go | 31 + daemon/checkpoint.go | 137 + daemon/cluster.go | 26 + daemon/cluster/cluster.go | 439 + daemon/cluster/configs.go | 117 + daemon/cluster/controllers/plugin/controller.go | 79 + daemon/cluster/convert/config.go | 61 + daemon/cluster/convert/container.go | 353 + daemon/cluster/convert/network.go | 239 + daemon/cluster/convert/node.go | 93 + daemon/cluster/convert/secret.go | 61 + daemon/cluster/convert/service.go | 532 + daemon/cluster/convert/service_test.go | 148 + daemon/cluster/convert/swarm.go | 148 + daemon/cluster/convert/task.go | 66 + daemon/cluster/executor/backend.go | 64 + daemon/cluster/executor/container/adapter.go | 472 + daemon/cluster/executor/container/attachment.go | 81 + daemon/cluster/executor/container/container.go | 675 + daemon/cluster/executor/container/controller.go | 697 + daemon/cluster/executor/container/errors.go | 17 + daemon/cluster/executor/container/executor.go | 241 + daemon/cluster/executor/container/health_test.go | 100 + daemon/cluster/executor/container/validate.go | 40 + daemon/cluster/executor/container/validate_test.go | 141 + .../executor/container/validate_unix_test.go | 8 + .../executor/container/validate_windows_test.go | 8 + daemon/cluster/filters.go | 121 + daemon/cluster/filters_test.go | 102 + daemon/cluster/helpers.go | 245 + daemon/cluster/listen_addr.go | 302 + daemon/cluster/listen_addr_linux.go | 91 + daemon/cluster/listen_addr_others.go | 9 + daemon/cluster/listen_addr_solaris.go | 57 + daemon/cluster/networks.go | 317 + daemon/cluster/noderunner.go | 371 + daemon/cluster/nodes.go | 104 + daemon/cluster/provider/network.go | 37 + daemon/cluster/secrets.go | 117 + daemon/cluster/services.go | 552 + daemon/cluster/swarm.go | 549 + daemon/cluster/tasks.go | 87 + daemon/cluster/utils.go | 63 + daemon/commit.go | 252 + daemon/config/config.go | 526 + daemon/config/config_common_unix.go | 73 + daemon/config/config_common_unix_test.go | 84 + daemon/config/config_solaris.go | 29 + daemon/config/config_test.go | 391 + daemon/config/config_unix.go | 63 + daemon/config/config_unix_test.go | 139 + daemon/config/config_windows.go | 52 + daemon/config/config_windows_test.go | 60 + daemon/configs.go | 23 + daemon/configs_linux.go | 7 + daemon/configs_unsupported.go | 7 + daemon/configs_windows.go | 7 + daemon/container.go | 321 + daemon/container_linux.go | 29 + daemon/container_operations.go | 1085 ++ daemon/container_operations_solaris.go | 46 + daemon/container_operations_unix.go | 356 + daemon/container_operations_windows.go | 165 + daemon/container_windows.go | 11 + daemon/create.go | 543 + daemon/create_unix.go | 81 + daemon/create_windows.go | 80 + daemon/daemon.go | 1284 ++ daemon/daemon_experimental.go | 7 + daemon/daemon_linux.go | 93 + daemon/daemon_linux_test.go | 104 + daemon/daemon_solaris.go | 532 + daemon/daemon_test.go | 302 + daemon/daemon_unix.go | 1326 ++ daemon/daemon_unix_test.go | 319 + daemon/daemon_unsupported.go | 5 + daemon/daemon_windows.go | 654 + daemon/debugtrap_unix.go | 27 + daemon/debugtrap_unsupported.go | 7 + daemon/debugtrap_windows.go | 46 + daemon/delete.go | 172 + daemon/delete_test.go | 96 + daemon/dependency.go | 17 + daemon/discovery/discovery.go | 202 + daemon/discovery/discovery_test.go | 111 + daemon/disk_usage.go | 128 + daemon/errors.go | 41 + daemon/events.go | 322 + daemon/events/events.go | 165 + daemon/events/events_test.go | 275 + daemon/events/filter.go | 130 + daemon/events/metrics.go | 15 + daemon/events/testutils/testutils.go | 76 + daemon/events_test.go | 90 + daemon/exec.go | 299 + daemon/exec/exec.go | 118 + daemon/exec_linux.go | 50 + daemon/exec_solaris.go | 11 + daemon/exec_windows.go | 16 + daemon/export.go | 59 + daemon/getsize_unix.go | 43 + daemon/graphdriver/aufs/aufs.go | 642 + daemon/graphdriver/aufs/aufs_test.go | 802 + daemon/graphdriver/aufs/dirs.go | 64 + daemon/graphdriver/aufs/mount.go | 21 + daemon/graphdriver/aufs/mount_linux.go | 7 + daemon/graphdriver/aufs/mount_unsupported.go | 12 + daemon/graphdriver/btrfs/btrfs.go | 671 + daemon/graphdriver/btrfs/btrfs_test.go | 63 + daemon/graphdriver/btrfs/dummy_unsupported.go | 3 + daemon/graphdriver/btrfs/version.go | 26 + daemon/graphdriver/btrfs/version_none.go | 14 + daemon/graphdriver/btrfs/version_test.go | 13 + daemon/graphdriver/counter.go | 59 + daemon/graphdriver/devmapper/README.md | 98 + daemon/graphdriver/devmapper/device_setup.go | 247 + daemon/graphdriver/devmapper/deviceset.go | 2812 +++ daemon/graphdriver/devmapper/devmapper_doc.go | 106 + daemon/graphdriver/devmapper/devmapper_test.go | 152 + daemon/graphdriver/devmapper/driver.go | 241 + daemon/graphdriver/devmapper/mount.go | 89 + daemon/graphdriver/driver.go | 287 + daemon/graphdriver/driver_freebsd.go | 19 + daemon/graphdriver/driver_linux.go | 135 + daemon/graphdriver/driver_solaris.go | 97 + daemon/graphdriver/driver_unsupported.go | 15 + daemon/graphdriver/driver_windows.go | 14 + daemon/graphdriver/fsdiff.go | 169 + daemon/graphdriver/graphtest/graphbench_unix.go | 259 + daemon/graphdriver/graphtest/graphtest_unix.go | 336 + daemon/graphdriver/graphtest/graphtest_windows.go | 1 + daemon/graphdriver/graphtest/testutil.go | 342 + daemon/graphdriver/graphtest/testutil_unix.go | 69 + daemon/graphdriver/lcow/lcow.go | 539 + daemon/graphdriver/overlay/copy.go | 174 + daemon/graphdriver/overlay/overlay.go | 469 + daemon/graphdriver/overlay/overlay_test.go | 93 + daemon/graphdriver/overlay/overlay_unsupported.go | 3 + daemon/graphdriver/overlay2/check.go | 79 + daemon/graphdriver/overlay2/mount.go | 88 + daemon/graphdriver/overlay2/overlay.go | 728 + daemon/graphdriver/overlay2/overlay_test.go | 121 + daemon/graphdriver/overlay2/overlay_unsupported.go | 3 + daemon/graphdriver/overlay2/randomid.go | 80 + daemon/graphdriver/overlayutils/overlayutils.go | 18 + daemon/graphdriver/plugin.go | 43 + daemon/graphdriver/proxy.go | 263 + daemon/graphdriver/quota/projectquota.go | 339 + daemon/graphdriver/register/register_aufs.go | 8 + daemon/graphdriver/register/register_btrfs.go | 8 + .../graphdriver/register/register_devicemapper.go | 8 + daemon/graphdriver/register/register_overlay.go | 9 + daemon/graphdriver/register/register_vfs.go | 6 + daemon/graphdriver/register/register_windows.go | 7 + daemon/graphdriver/register/register_zfs.go | 8 + daemon/graphdriver/vfs/driver.go | 134 + daemon/graphdriver/vfs/vfs_test.go | 37 + daemon/graphdriver/windows/windows.go | 960 + daemon/graphdriver/zfs/MAINTAINERS | 2 + daemon/graphdriver/zfs/zfs.go | 420 + daemon/graphdriver/zfs/zfs_freebsd.go | 38 + daemon/graphdriver/zfs/zfs_linux.go | 27 + daemon/graphdriver/zfs/zfs_solaris.go | 59 + daemon/graphdriver/zfs/zfs_test.go | 35 + daemon/graphdriver/zfs/zfs_unsupported.go | 11 + daemon/health.go | 395 + daemon/health_test.go | 154 + daemon/image.go | 84 + daemon/image_delete.go | 413 + daemon/image_exporter.go | 37 + daemon/image_history.go | 91 + daemon/image_inspect.go | 96 + daemon/image_pull.go | 132 + daemon/image_push.go | 71 + daemon/image_tag.go | 40 + daemon/images.go | 359 + daemon/import.go | 137 + daemon/info.go | 190 + daemon/info_unix.go | 93 + daemon/info_unix_test.go | 52 + daemon/info_windows.go | 10 + daemon/initlayer/setup_solaris.go | 13 + daemon/initlayer/setup_unix.go | 69 + daemon/initlayer/setup_windows.go | 17 + daemon/inspect.go | 270 + daemon/inspect_solaris.go | 27 + daemon/inspect_unix.go | 75 + daemon/inspect_windows.go | 26 + daemon/keys.go | 59 + daemon/keys_unsupported.go | 8 + daemon/kill.go | 171 + daemon/links.go | 87 + daemon/links/links.go | 141 + daemon/links/links_test.go | 213 + daemon/list.go | 668 + daemon/list_unix.go | 11 + daemon/list_windows.go | 20 + daemon/logdrivers_linux.go | 8 + daemon/logdrivers_windows.go | 13 + daemon/logger/adapter.go | 137 + daemon/logger/adapter_test.go | 180 + daemon/logger/awslogs/cloudwatchlogs.go | 598 + daemon/logger/awslogs/cloudwatchlogs_test.go | 1053 ++ daemon/logger/awslogs/cwlogsiface_mock_test.go | 92 + daemon/logger/copier.go | 135 + daemon/logger/copier_test.go | 296 + daemon/logger/etwlogs/etwlogs_windows.go | 169 + daemon/logger/factory.go | 162 + daemon/logger/fluentd/fluentd.go | 250 + daemon/logger/gcplogs/gcplogging.go | 244 + daemon/logger/gcplogs/gcplogging_linux.go | 31 + daemon/logger/gcplogs/gcplogging_others.go | 7 + daemon/logger/gelf/gelf.go | 209 + daemon/logger/gelf/gelf_unsupported.go | 3 + daemon/logger/journald/journald.go | 126 + daemon/logger/journald/journald_test.go | 23 + daemon/logger/journald/journald_unsupported.go | 6 + daemon/logger/journald/read.go | 425 + daemon/logger/journald/read_native.go | 6 + daemon/logger/journald/read_native_compat.go | 6 + daemon/logger/journald/read_unsupported.go | 7 + daemon/logger/jsonfilelog/jsonfilelog.go | 159 + daemon/logger/jsonfilelog/jsonfilelog_test.go | 249 + .../logger/jsonfilelog/multireader/multireader.go | 224 + .../jsonfilelog/multireader/multireader_test.go | 225 + daemon/logger/jsonfilelog/read.go | 324 + daemon/logger/logentries/logentries.go | 97 + daemon/logger/logger.go | 136 + daemon/logger/logger_test.go | 19 + daemon/logger/loggerutils/log_tag.go | 31 + daemon/logger/loggerutils/log_tag_test.go | 47 + daemon/logger/loggerutils/rotatefilewriter.go | 141 + daemon/logger/loginfo.go | 129 + daemon/logger/plugin.go | 90 + daemon/logger/plugin_unix.go | 20 + daemon/logger/plugin_unsupported.go | 12 + daemon/logger/proxy.go | 107 + daemon/logger/ring.go | 218 + daemon/logger/ring_test.go | 299 + daemon/logger/splunk/splunk.go | 626 + daemon/logger/splunk/splunk_test.go | 1306 ++ daemon/logger/splunk/splunkhecmock_test.go | 157 + daemon/logger/syslog/syslog.go | 266 + daemon/logger/syslog/syslog_test.go | 62 + daemon/logs.go | 175 + daemon/logs_test.go | 15 + daemon/metrics.go | 174 + daemon/metrics_unix.go | 86 + daemon/metrics_unsupported.go | 12 + daemon/monitor.go | 180 + daemon/monitor_linux.go | 19 + daemon/monitor_solaris.go | 18 + daemon/monitor_windows.go | 46 + daemon/mounts.go | 53 + daemon/names.go | 112 + daemon/network.go | 567 + daemon/network/settings.go | 33 + daemon/oci_linux.go | 838 + daemon/oci_solaris.go | 187 + daemon/oci_windows.go | 208 + daemon/pause.go | 49 + daemon/prune.go | 468 + daemon/reload.go | 312 + daemon/reload_test.go | 474 + daemon/rename.go | 123 + daemon/resize.go | 40 + daemon/restart.go | 70 + daemon/search.go | 94 + daemon/search_test.go | 358 + daemon/seccomp_disabled.go | 19 + daemon/seccomp_linux.go | 55 + daemon/seccomp_unsupported.go | 5 + daemon/secrets.go | 23 + daemon/secrets_linux.go | 7 + daemon/secrets_unsupported.go | 7 + daemon/secrets_windows.go | 7 + daemon/selinux_linux.go | 17 + daemon/selinux_unsupported.go | 13 + daemon/start.go | 228 + daemon/start_unix.go | 32 + daemon/start_windows.go | 210 + daemon/stats.go | 160 + daemon/stats/collector.go | 116 + daemon/stats/collector_solaris.go | 29 + daemon/stats/collector_unix.go | 83 + daemon/stats/collector_windows.go | 19 + daemon/stats/types.go | 42 + daemon/stats_collector.go | 26 + daemon/stats_unix.go | 58 + daemon/stats_windows.go | 11 + daemon/stop.go | 91 + daemon/top_unix.go | 155 + daemon/top_unix_test.go | 79 + daemon/top_windows.go | 53 + daemon/unpause.go | 38 + daemon/update.go | 86 + daemon/update_linux.go | 32 + daemon/update_solaris.go | 11 + daemon/update_windows.go | 13 + daemon/volumes.go | 395 + daemon/volumes_unit_test.go | 39 + daemon/volumes_unix.go | 232 + daemon/volumes_unix_test.go | 256 + daemon/volumes_windows.go | 48 + daemon/wait.go | 22 + daemon/workdir.go | 20 + distribution/config.go | 264 + distribution/errors.go | 159 + .../fixtures/validate_manifest/bad_manifest | 38 + .../fixtures/validate_manifest/extra_data_manifest | 46 + .../fixtures/validate_manifest/good_manifest | 38 + distribution/metadata/metadata.go | 77 + distribution/metadata/v1_id_service.go | 51 + distribution/metadata/v1_id_service_test.go | 84 + distribution/metadata/v2_metadata_service.go | 241 + distribution/metadata/v2_metadata_service_test.go | 116 + distribution/pull.go | 198 + distribution/pull_v1.go | 376 + distribution/pull_v2.go | 819 + distribution/pull_v2_test.go | 183 + distribution/pull_v2_unix.go | 13 + distribution/pull_v2_windows.go | 57 + distribution/push.go | 186 + distribution/push_v1.go | 463 + distribution/push_v2.go | 691 + distribution/push_v2_test.go | 583 + distribution/registry.go | 156 + distribution/registry_unit_test.go | 172 + distribution/utils/progress.go | 44 + distribution/xfer/download.go | 504 + distribution/xfer/download_test.go | 382 + distribution/xfer/transfer.go | 401 + distribution/xfer/transfer_test.go | 410 + distribution/xfer/upload.go | 174 + distribution/xfer/upload_test.go | 134 + dockerversion/useragent.go | 76 + dockerversion/version_lib.go | 16 + docs/api/v1.18.md | 2159 +++ docs/api/v1.19.md | 2241 +++ docs/api/v1.20.md | 2394 +++ docs/api/v1.21.md | 2981 ++++ docs/api/v1.22.md | 3319 ++++ docs/api/v1.23.md | 3436 ++++ docs/api/v1.24.md | 5348 ++++++ docs/api/version-history.md | 318 + docs/getting-started.md | 79 + docs/static_files/balena-logo-black.svg | 56 + docs/static_files/contributors.png | Bin 0 -> 23100 bytes docs/static_files/docker-logo-compressed.png | Bin 0 -> 4972 bytes docs/static_files/moby-project-logo.png | Bin 0 -> 20458 bytes hack/Jenkins/W2L/postbuild.sh | 35 + hack/Jenkins/W2L/setup.sh | 309 + hack/Jenkins/readme.md | 3 + hack/README.md | 68 + hack/dind | 33 + hack/dockerfile/binaries-commits | 14 + hack/dockerfile/install-binaries.sh | 125 + hack/generate-authors.sh | 15 + hack/generate-swagger-api.sh | 27 + hack/install.sh | 544 + hack/integration-cli-on-swarm/README.md | 69 + hack/integration-cli-on-swarm/agent/Dockerfile | 6 + hack/integration-cli-on-swarm/agent/master/call.go | 132 + .../agent/master/master.go | 65 + hack/integration-cli-on-swarm/agent/master/set.go | 28 + .../agent/master/set_test.go | 63 + hack/integration-cli-on-swarm/agent/types/types.go | 18 + hack/integration-cli-on-swarm/agent/vendor.conf | 2 + .../vendor/github.com/bfirsh/funker-go/LICENSE | 191 + .../vendor/github.com/bfirsh/funker-go/call.go | 50 + .../vendor/github.com/bfirsh/funker-go/handle.go | 54 + .../agent/worker/executor.go | 118 + .../agent/worker/worker.go | 69 + hack/integration-cli-on-swarm/host/compose.go | 122 + hack/integration-cli-on-swarm/host/dockercmd.go | 64 + hack/integration-cli-on-swarm/host/enumerate.go | 55 + .../host/enumerate_test.go | 84 + hack/integration-cli-on-swarm/host/host.go | 198 + hack/integration-cli-on-swarm/host/volume.go | 88 + hack/make.ps1 | 472 + hack/make.sh | 296 + hack/make/.binary | 44 + hack/make/.binary-setup | 11 + hack/make/.build-deb/compat | 1 + hack/make/.build-deb/control | 29 + hack/make/.build-deb/docker-engine.bash-completion | 1 + hack/make/.build-deb/docker-engine.docker.default | 1 + hack/make/.build-deb/docker-engine.docker.init | 1 + hack/make/.build-deb/docker-engine.docker.upstart | 1 + hack/make/.build-deb/docker-engine.install | 12 + hack/make/.build-deb/docker-engine.manpages | 1 + hack/make/.build-deb/docker-engine.postinst | 20 + hack/make/.build-deb/docker-engine.udev | 1 + hack/make/.build-deb/docs | 1 + hack/make/.build-deb/rules | 53 + hack/make/.build-rpm/docker-engine.spec | 249 + hack/make/.detect-daemon-osarch | 73 + hack/make/.ensure-emptyfs | 23 + hack/make/.go-autogen | 86 + hack/make/.go-autogen.ps1 | 91 + hack/make/.integration-daemon-setup | 7 + hack/make/.integration-daemon-start | 132 + hack/make/.integration-daemon-stop | 27 + hack/make/.integration-test-helpers | 85 + hack/make/.resources-windows/common.rc | 38 + hack/make/.resources-windows/docker.exe.manifest | 18 + hack/make/.resources-windows/docker.ico | Bin 0 -> 370070 bytes hack/make/.resources-windows/docker.png | Bin 0 -> 658195 bytes hack/make/.resources-windows/docker.rc | 3 + hack/make/.resources-windows/dockerd.rc | 4 + hack/make/.resources-windows/event_messages.mc | 39 + hack/make/.resources-windows/resources.go | 18 + hack/make/README.md | 17 + hack/make/binary | 10 + hack/make/binary-daemon | 9 + hack/make/binary-docker | 18 + hack/make/build-deb | 91 + hack/make/build-integration-test-binary | 13 + hack/make/build-rpm | 148 + hack/make/clean-apt-repo | 43 + hack/make/clean-yum-repo | 20 + hack/make/cover | 15 + hack/make/cross | 29 + hack/make/dynbinary | 10 + hack/make/dynbinary-daemon | 10 + hack/make/dynbinary-docker | 21 + hack/make/generate-index-listing | 74 + hack/make/install-binary | 8 + hack/make/install-binary-daemon | 16 + hack/make/install-script | 63 + hack/make/release-deb | 163 + hack/make/release-rpm | 71 + hack/make/run | 44 + hack/make/sign-repos | 65 + hack/make/test-deb-install | 71 + hack/make/test-docker-py | 17 + hack/make/test-install-script | 31 + hack/make/test-integration-cli | 28 + hack/make/test-integration-shell | 7 + hack/make/test-old-apt-repo | 29 + hack/make/test-unit | 60 + hack/make/tgz | 2 + hack/make/ubuntu | 190 + hack/make/update-apt-repo | 70 + hack/make/win | 20 + hack/release.sh | 325 + hack/validate/.swagger-yamllint | 4 + hack/validate/.validate | 30 + hack/validate/all | 8 + hack/validate/changelog-date-descending | 12 + hack/validate/changelog-well-formed | 25 + hack/validate/dco | 55 + hack/validate/default | 18 + hack/validate/default-seccomp | 28 + hack/validate/gofmt | 33 + hack/validate/lint | 31 + hack/validate/pkg-imports | 33 + hack/validate/swagger | 13 + hack/validate/swagger-gen | 29 + hack/validate/test-imports | 38 + hack/validate/toml | 31 + hack/validate/vendor | 51 + hack/validate/vet | 32 + hack/vendor.sh | 15 + image/cache/cache.go | 253 + image/cache/compare.go | 63 + image/cache/compare_test.go | 126 + image/fs.go | 178 + image/fs_test.go | 275 + image/image.go | 220 + image/image_test.go | 53 + image/rootfs.go | 44 + image/spec/v1.1.md | 637 + image/spec/v1.2.md | 696 + image/spec/v1.md | 573 + image/store.go | 367 + image/store_test.go | 178 + image/tarexport/load.go | 431 + image/tarexport/save.go | 409 + image/tarexport/tarexport.go | 47 + image/v1/imagev1.go | 150 + image/v1/imagev1_test.go | 55 + integration-cli/benchmark_test.go | 95 + integration-cli/check_test.go | 432 + integration-cli/checker/checker.go | 46 + integration-cli/cli/build/build.go | 82 + integration-cli/cli/build/fakecontext/context.go | 124 + integration-cli/cli/build/fakegit/fakegit.go | 125 + integration-cli/cli/build/fakestorage/fixtures.go | 67 + integration-cli/cli/build/fakestorage/storage.go | 176 + integration-cli/cli/cli.go | 231 + integration-cli/daemon/daemon.go | 815 + integration-cli/daemon/daemon_swarm.go | 566 + integration-cli/daemon/daemon_unix.go | 36 + integration-cli/daemon/daemon_windows.go | 54 + integration-cli/daemon_swarm_hack_test.go | 23 + integration-cli/docker_api_attach_test.go | 210 + integration-cli/docker_api_auth_test.go | 26 + integration-cli/docker_api_build_test.go | 535 + integration-cli/docker_api_containers_test.go | 1950 +++ integration-cli/docker_api_create_test.go | 171 + integration-cli/docker_api_events_test.go | 74 + integration-cli/docker_api_exec_resize_test.go | 104 + integration-cli/docker_api_exec_test.go | 249 + integration-cli/docker_api_images_test.go | 192 + integration-cli/docker_api_info_test.go | 83 + integration-cli/docker_api_inspect_test.go | 184 + integration-cli/docker_api_inspect_unix_test.go | 36 + integration-cli/docker_api_logs_test.go | 84 + integration-cli/docker_api_network_test.go | 357 + integration-cli/docker_api_resize_test.go | 45 + integration-cli/docker_api_session_test.go | 49 + integration-cli/docker_api_stats_test.go | 310 + integration-cli/docker_api_stats_unix_test.go | 42 + integration-cli/docker_api_swarm_config_test.go | 118 + integration-cli/docker_api_swarm_node_test.go | 128 + integration-cli/docker_api_swarm_secret_test.go | 132 + integration-cli/docker_api_swarm_service_test.go | 598 + integration-cli/docker_api_swarm_test.go | 1039 ++ integration-cli/docker_api_test.go | 122 + integration-cli/docker_api_update_unix_test.go | 36 + integration-cli/docker_api_version_test.go | 24 + integration-cli/docker_api_volumes_test.go | 103 + integration-cli/docker_cli_attach_test.go | 168 + integration-cli/docker_cli_attach_unix_test.go | 237 + integration-cli/docker_cli_authz_plugin_v2_test.go | 165 + integration-cli/docker_cli_authz_unix_test.go | 475 + integration-cli/docker_cli_build_test.go | 6490 +++++++ integration-cli/docker_cli_build_unix_test.go | 204 + integration-cli/docker_cli_by_digest_test.go | 690 + integration-cli/docker_cli_commit_test.go | 157 + integration-cli/docker_cli_config_create_test.go | 131 + integration-cli/docker_cli_config_inspect_test.go | 68 + integration-cli/docker_cli_config_ls_test.go | 125 + integration-cli/docker_cli_config_test.go | 150 + .../docker_cli_cp_from_container_test.go | 488 + integration-cli/docker_cli_cp_test.go | 665 + integration-cli/docker_cli_cp_to_container_test.go | 600 + .../docker_cli_cp_to_container_unix_test.go | 81 + integration-cli/docker_cli_cp_utils_test.go | 319 + integration-cli/docker_cli_create_test.go | 443 + integration-cli/docker_cli_create_unix_test.go | 43 + integration-cli/docker_cli_daemon_plugins_test.go | 391 + integration-cli/docker_cli_daemon_test.go | 2986 ++++ integration-cli/docker_cli_diff_test.go | 98 + integration-cli/docker_cli_events_test.go | 801 + integration-cli/docker_cli_events_unix_test.go | 485 + integration-cli/docker_cli_exec_test.go | 602 + integration-cli/docker_cli_exec_unix_test.go | 93 + integration-cli/docker_cli_experimental_test.go | 29 + integration-cli/docker_cli_export_import_test.go | 51 + .../docker_cli_external_graphdriver_unix_test.go | 406 + .../docker_cli_external_volume_driver_unix_test.go | 633 + integration-cli/docker_cli_health_test.go | 164 + integration-cli/docker_cli_help_test.go | 319 + integration-cli/docker_cli_history_test.go | 119 + integration-cli/docker_cli_images_test.go | 366 + integration-cli/docker_cli_import_test.go | 143 + integration-cli/docker_cli_info_test.go | 243 + integration-cli/docker_cli_info_unix_test.go | 15 + integration-cli/docker_cli_inspect_test.go | 470 + integration-cli/docker_cli_kill_test.go | 138 + integration-cli/docker_cli_links_test.go | 236 + integration-cli/docker_cli_links_unix_test.go | 26 + integration-cli/docker_cli_login_test.go | 30 + integration-cli/docker_cli_logout_test.go | 108 + integration-cli/docker_cli_logs_bench_test.go | 32 + integration-cli/docker_cli_logs_test.go | 307 + integration-cli/docker_cli_nat_test.go | 88 + integration-cli/docker_cli_netmode_test.go | 94 + integration-cli/docker_cli_network_unix_test.go | 1845 ++ integration-cli/docker_cli_oom_killed_test.go | 30 + integration-cli/docker_cli_pause_test.go | 67 + .../docker_cli_plugins_logdriver_test.go | 53 + integration-cli/docker_cli_plugins_test.go | 518 + integration-cli/docker_cli_port_test.go | 319 + integration-cli/docker_cli_proxy_test.go | 51 + integration-cli/docker_cli_prune_unix_test.go | 294 + integration-cli/docker_cli_ps_test.go | 967 ++ integration-cli/docker_cli_pull_local_test.go | 470 + integration-cli/docker_cli_pull_test.go | 284 + integration-cli/docker_cli_pull_trusted_test.go | 222 + integration-cli/docker_cli_push_test.go | 604 + .../docker_cli_registry_user_agent_test.go | 103 + integration-cli/docker_cli_rename_test.go | 138 + integration-cli/docker_cli_restart_test.go | 309 + integration-cli/docker_cli_rm_test.go | 87 + integration-cli/docker_cli_rmi_test.go | 338 + integration-cli/docker_cli_run_test.go | 4621 +++++ integration-cli/docker_cli_run_unix_test.go | 1576 ++ integration-cli/docker_cli_save_load_test.go | 385 + integration-cli/docker_cli_save_load_unix_test.go | 107 + integration-cli/docker_cli_search_test.go | 131 + integration-cli/docker_cli_secret_create_test.go | 131 + integration-cli/docker_cli_secret_inspect_test.go | 68 + integration-cli/docker_cli_secret_ls_test.go | 125 + integration-cli/docker_cli_service_create_test.go | 447 + integration-cli/docker_cli_service_health_test.go | 134 + integration-cli/docker_cli_service_logs_test.go | 387 + integration-cli/docker_cli_service_scale_test.go | 57 + integration-cli/docker_cli_service_update_test.go | 172 + integration-cli/docker_cli_sni_test.go | 44 + integration-cli/docker_cli_stack_test.go | 206 + integration-cli/docker_cli_start_test.go | 199 + integration-cli/docker_cli_stats_test.go | 179 + integration-cli/docker_cli_stop_test.go | 17 + integration-cli/docker_cli_swarm_test.go | 2205 +++ integration-cli/docker_cli_swarm_unix_test.go | 104 + integration-cli/docker_cli_tag_test.go | 168 + integration-cli/docker_cli_top_test.go | 73 + integration-cli/docker_cli_update_test.go | 43 + integration-cli/docker_cli_update_unix_test.go | 327 + integration-cli/docker_cli_userns_test.go | 99 + integration-cli/docker_cli_v2_only_test.go | 120 + integration-cli/docker_cli_version_test.go | 58 + integration-cli/docker_cli_volume_test.go | 643 + integration-cli/docker_cli_wait_test.go | 98 + integration-cli/docker_deprecated_api_v124_test.go | 229 + .../docker_deprecated_api_v124_unix_test.go | 31 + .../docker_experimental_network_test.go | 533 + integration-cli/docker_hub_pull_suite_test.go | 91 + integration-cli/docker_utils_test.go | 502 + integration-cli/environment/clean.go | 217 + integration-cli/environment/environment.go | 229 + integration-cli/environment/protect.go | 12 + integration-cli/events_utils_test.go | 206 + .../fixtures/auth/docker-credential-shell-test | 55 + .../fixtures/credentialspecs/valid.json | 25 + integration-cli/fixtures/deploy/default.yaml | 9 + integration-cli/fixtures/deploy/remove.yaml | 11 + integration-cli/fixtures/deploy/secrets.yaml | 20 + integration-cli/fixtures/https/ca.pem | 23 + integration-cli/fixtures/https/client-cert.pem | 73 + integration-cli/fixtures/https/client-key.pem | 16 + .../fixtures/https/client-rogue-cert.pem | 73 + .../fixtures/https/client-rogue-key.pem | 16 + integration-cli/fixtures/https/server-cert.pem | 76 + integration-cli/fixtures/https/server-key.pem | 16 + .../fixtures/https/server-rogue-cert.pem | 76 + .../fixtures/https/server-rogue-key.pem | 16 + integration-cli/fixtures/load/emptyLayer.tar | Bin 0 -> 30720 bytes integration-cli/fixtures/load/frozen.go | 182 + integration-cli/fixtures/notary/delgkey1.crt | 21 + integration-cli/fixtures/notary/delgkey1.key | 27 + integration-cli/fixtures/notary/delgkey2.crt | 21 + integration-cli/fixtures/notary/delgkey2.key | 27 + integration-cli/fixtures/notary/delgkey3.crt | 21 + integration-cli/fixtures/notary/delgkey3.key | 27 + integration-cli/fixtures/notary/delgkey4.crt | 21 + integration-cli/fixtures/notary/delgkey4.key | 27 + integration-cli/fixtures/notary/gen.sh | 18 + integration-cli/fixtures/notary/localhost.cert | 19 + integration-cli/fixtures/notary/localhost.key | 27 + integration-cli/fixtures/registry/cert.pem | 21 + integration-cli/fixtures/secrets/default | 1 + integration-cli/fixtures_linux_daemon_test.go | 149 + integration-cli/registry/registry.go | 214 + integration-cli/registry/registry_mock.go | 66 + integration-cli/registry/requirement.go | 12 + integration-cli/request/npipe.go | 12 + integration-cli/request/npipe_windows.go | 12 + integration-cli/request/request.go | 312 + integration-cli/requirement/requirement.go | 33 + integration-cli/requirements_test.go | 203 + integration-cli/requirements_unix_test.go | 115 + integration-cli/test_vars_exec_test.go | 8 + integration-cli/test_vars_noexec_test.go | 8 + integration-cli/test_vars_noseccomp_test.go | 8 + integration-cli/test_vars_seccomp_test.go | 8 + integration-cli/test_vars_test.go | 11 + integration-cli/test_vars_unix_test.go | 14 + integration-cli/test_vars_windows_test.go | 15 + integration-cli/trust_server_test.go | 319 + integration-cli/utils_test.go | 32 + landr.conf.js | 88 + layer/empty.go | 74 + layer/empty_test.go | 46 + layer/filestore.go | 354 + layer/filestore_test.go | 104 + layer/filestore_unix.go | 13 + layer/filestore_windows.go | 35 + layer/layer.go | 300 + layer/layer_store.go | 789 + layer/layer_store_windows.go | 11 + layer/layer_test.go | 772 + layer/layer_unix.go | 9 + layer/layer_unix_test.go | 71 + layer/layer_windows.go | 34 + layer/migration.go | 256 + layer/migration_test.go | 435 + layer/mount_test.go | 239 + layer/mounted_layer.go | 99 + layer/ro_layer.go | 190 + layer/ro_layer_unix.go | 7 + layer/ro_layer_windows.go | 16 + libcontainerd/client.go | 46 + libcontainerd/client_linux.go | 619 + libcontainerd/client_solaris.go | 101 + libcontainerd/client_unix.go | 141 + libcontainerd/client_windows.go | 754 + libcontainerd/container.go | 13 + libcontainerd/container_unix.go | 246 + libcontainerd/container_windows.go | 330 + libcontainerd/oom_linux.go | 31 + libcontainerd/oom_solaris.go | 5 + libcontainerd/pausemonitor_unix.go | 42 + libcontainerd/process.go | 18 + libcontainerd/process_unix.go | 107 + libcontainerd/process_windows.go | 48 + libcontainerd/queue_unix.go | 37 + libcontainerd/queue_unix_test.go | 33 + libcontainerd/remote.go | 20 + libcontainerd/remote_unix.go | 565 + libcontainerd/remote_windows.go | 36 + libcontainerd/types.go | 75 + libcontainerd/types_linux.go | 49 + libcontainerd/types_solaris.go | 43 + libcontainerd/types_windows.go | 79 + libcontainerd/utils_linux.go | 62 + libcontainerd/utils_solaris.go | 27 + libcontainerd/utils_windows.go | 46 + libcontainerd/utils_windows_test.go | 13 + migrate/v1/migratev1.go | 506 + migrate/v1/migratev1_test.go | 448 + oci/defaults.go | 221 + oci/devices_linux.go | 86 + oci/devices_unsupported.go | 20 + oci/namespaces.go | 13 + opts/env.go | 46 + opts/env_test.go | 42 + opts/hosts.go | 165 + opts/hosts_test.go | 181 + opts/hosts_unix.go | 8 + opts/hosts_windows.go | 6 + opts/ip.go | 47 + opts/ip_test.go | 54 + opts/opts.go | 327 + opts/opts_test.go | 264 + opts/opts_unix.go | 6 + opts/opts_windows.go | 56 + opts/quotedstring.go | 37 + opts/quotedstring_test.go | 29 + opts/runtime.go | 79 + opts/ulimit.go | 81 + opts/ulimit_test.go | 42 + packaging/docker-engine.service | 14 + packaging/docker-engine.socket | 13 + packaging/docker-engine.spec | 108 +- pkg/README.md | 11 + pkg/aaparser/aaparser.go | 89 + pkg/aaparser/aaparser_test.go | 73 + pkg/archive/README.md | 1 + pkg/archive/archive.go | 1195 ++ pkg/archive/archive_linux.go | 92 + pkg/archive/archive_linux_test.go | 187 + pkg/archive/archive_other.go | 7 + pkg/archive/archive_test.go | 1260 ++ pkg/archive/archive_unix.go | 121 + pkg/archive/archive_unix_test.go | 249 + pkg/archive/archive_windows.go | 79 + pkg/archive/archive_windows_test.go | 93 + pkg/archive/changes.go | 441 + pkg/archive/changes_linux.go | 312 + pkg/archive/changes_other.go | 97 + pkg/archive/changes_posix_test.go | 132 + pkg/archive/changes_test.go | 572 + pkg/archive/changes_unix.go | 36 + pkg/archive/changes_windows.go | 30 + pkg/archive/copy.go | 458 + pkg/archive/copy_unix.go | 11 + pkg/archive/copy_unix_test.go | 978 ++ pkg/archive/copy_windows.go | 9 + pkg/archive/diff.go | 256 + pkg/archive/diff_test.go | 386 + pkg/archive/example_changes.go | 97 + pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes pkg/archive/time_linux.go | 16 + pkg/archive/time_unsupported.go | 16 + pkg/archive/utils_test.go | 166 + pkg/archive/whiteouts.go | 23 + pkg/archive/wrap.go | 59 + pkg/archive/wrap_test.go | 98 + pkg/authorization/api.go | 88 + pkg/authorization/api_test.go | 75 + pkg/authorization/authz.go | 186 + pkg/authorization/authz_unix_test.go | 282 + pkg/authorization/middleware.go | 110 + pkg/authorization/middleware_test.go | 53 + pkg/authorization/middleware_unix_test.go | 65 + pkg/authorization/plugin.go | 118 + pkg/authorization/response.go | 203 + pkg/broadcaster/unbuffered.go | 49 + pkg/broadcaster/unbuffered_test.go | 162 + pkg/chrootarchive/archive.go | 70 + pkg/chrootarchive/archive_test.go | 412 + pkg/chrootarchive/archive_unix.go | 86 + pkg/chrootarchive/archive_windows.go | 22 + pkg/chrootarchive/chroot_linux.go | 108 + pkg/chrootarchive/chroot_unix.go | 12 + pkg/chrootarchive/diff.go | 23 + pkg/chrootarchive/diff_unix.go | 130 + pkg/chrootarchive/diff_windows.go | 45 + pkg/chrootarchive/init_unix.go | 28 + pkg/chrootarchive/init_windows.go | 4 + pkg/devicemapper/devmapper.go | 829 + pkg/devicemapper/devmapper_log.go | 35 + pkg/devicemapper/devmapper_wrapper.go | 251 + .../devmapper_wrapper_deferred_remove.go | 34 + .../devmapper_wrapper_no_deferred_remove.go | 15 + pkg/devicemapper/ioctl.go | 27 + pkg/devicemapper/log.go | 11 + pkg/directory/directory.go | 26 + pkg/directory/directory_test.go | 192 + pkg/directory/directory_unix.go | 48 + pkg/directory/directory_windows.go | 37 + pkg/discovery/README.md | 41 + pkg/discovery/backends.go | 107 + pkg/discovery/discovery.go | 35 + pkg/discovery/discovery_test.go | 137 + pkg/discovery/entry.go | 94 + pkg/discovery/file/file.go | 107 + pkg/discovery/file/file_test.go | 114 + pkg/discovery/generator.go | 35 + pkg/discovery/generator_test.go | 53 + pkg/discovery/kv/kv.go | 180 + pkg/discovery/kv/kv_test.go | 324 + pkg/discovery/memory/memory.go | 93 + pkg/discovery/memory/memory_test.go | 48 + pkg/discovery/nodes/nodes.go | 54 + pkg/discovery/nodes/nodes_test.go | 51 + pkg/filenotify/filenotify.go | 40 + pkg/filenotify/fsnotify.go | 18 + pkg/filenotify/poller.go | 204 + pkg/filenotify/poller_test.go | 119 + pkg/fileutils/fileutils.go | 298 + pkg/fileutils/fileutils_darwin.go | 27 + pkg/fileutils/fileutils_solaris.go | 7 + pkg/fileutils/fileutils_test.go | 591 + pkg/fileutils/fileutils_unix.go | 22 + pkg/fileutils/fileutils_windows.go | 7 + pkg/fsutils/fsutils_linux.go | 87 + pkg/fsutils/fsutils_linux_test.go | 91 + pkg/homedir/homedir.go | 39 + pkg/homedir/homedir_linux.go | 23 + pkg/homedir/homedir_others.go | 13 + pkg/homedir/homedir_test.go | 24 + pkg/idtools/idtools.go | 279 + pkg/idtools/idtools_unix.go | 204 + pkg/idtools/idtools_unix_test.go | 253 + pkg/idtools/idtools_windows.go | 25 + pkg/idtools/usergroupadd_linux.go | 164 + pkg/idtools/usergroupadd_unsupported.go | 12 + pkg/idtools/utils_unix.go | 32 + pkg/ioutils/buffer.go | 51 + pkg/ioutils/buffer_test.go | 153 + pkg/ioutils/bytespipe.go | 186 + pkg/ioutils/bytespipe_test.go | 159 + pkg/ioutils/concat.go | 123 + pkg/ioutils/fswriters.go | 177 + pkg/ioutils/fswriters_test.go | 132 + pkg/ioutils/readers.go | 197 + pkg/ioutils/readers_test.go | 94 + pkg/ioutils/temp_unix.go | 10 + pkg/ioutils/temp_windows.go | 18 + pkg/ioutils/writeflusher.go | 92 + pkg/ioutils/writers.go | 66 + pkg/ioutils/writers_test.go | 65 + pkg/jsonlog/jsonlog.go | 42 + pkg/jsonlog/jsonlog_marshalling.go | 178 + pkg/jsonlog/jsonlog_marshalling_test.go | 34 + pkg/jsonlog/jsonlogbytes.go | 122 + pkg/jsonlog/jsonlogbytes_test.go | 39 + pkg/jsonlog/time_marshalling.go | 27 + pkg/jsonlog/time_marshalling_test.go | 47 + pkg/jsonmessage/jsonmessage.go | 315 + pkg/jsonmessage/jsonmessage_test.go | 281 + pkg/listeners/group_unix.go | 34 + pkg/listeners/listeners_solaris.go | 43 + pkg/listeners/listeners_unix.go | 104 + pkg/listeners/listeners_windows.go | 54 + pkg/locker/README.md | 65 + pkg/locker/locker.go | 112 + pkg/locker/locker_test.go | 124 + pkg/longpath/longpath.go | 26 + pkg/longpath/longpath_test.go | 22 + pkg/loopback/attach_loopback.go | 137 + pkg/loopback/ioctl.go | 53 + pkg/loopback/loop_wrapper.go | 52 + pkg/loopback/loopback.go | 63 + pkg/mount/flags.go | 149 + pkg/mount/flags_freebsd.go | 49 + pkg/mount/flags_linux.go | 87 + pkg/mount/flags_unsupported.go | 31 + pkg/mount/mount.go | 86 + pkg/mount/mount_unix_test.go | 162 + pkg/mount/mounter_freebsd.go | 59 + pkg/mount/mounter_linux.go | 56 + pkg/mount/mounter_linux_test.go | 195 + pkg/mount/mounter_solaris.go | 33 + pkg/mount/mounter_unsupported.go | 11 + pkg/mount/mountinfo.go | 54 + pkg/mount/mountinfo_freebsd.go | 41 + pkg/mount/mountinfo_linux.go | 95 + pkg/mount/mountinfo_linux_test.go | 476 + pkg/mount/mountinfo_solaris.go | 37 + pkg/mount/mountinfo_unsupported.go | 12 + pkg/mount/mountinfo_windows.go | 6 + pkg/mount/sharedsubtree_linux.go | 69 + pkg/mount/sharedsubtree_linux_test.go | 331 + pkg/mount/sharedsubtree_solaris.go | 58 + pkg/namesgenerator/cmd/names-generator/main.go | 11 + pkg/namesgenerator/names-generator.go | 606 + pkg/namesgenerator/names-generator_test.go | 27 + pkg/parsers/kernel/kernel.go | 74 + pkg/parsers/kernel/kernel_darwin.go | 56 + pkg/parsers/kernel/kernel_unix.go | 45 + pkg/parsers/kernel/kernel_unix_test.go | 96 + pkg/parsers/kernel/kernel_windows.go | 69 + pkg/parsers/kernel/uname_linux.go | 19 + pkg/parsers/kernel/uname_solaris.go | 14 + pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem/operatingsystem_linux.go | 77 + .../operatingsystem/operatingsystem_solaris.go | 37 + .../operatingsystem/operatingsystem_unix.go | 25 + .../operatingsystem/operatingsystem_unix_test.go | 247 + .../operatingsystem/operatingsystem_windows.go | 49 + pkg/parsers/parsers.go | 69 + pkg/parsers/parsers_test.go | 70 + pkg/pidfile/pidfile.go | 53 + pkg/pidfile/pidfile_darwin.go | 14 + pkg/pidfile/pidfile_test.go | 38 + pkg/pidfile/pidfile_unix.go | 16 + pkg/pidfile/pidfile_windows.go | 23 + pkg/platform/architecture_linux.go | 16 + pkg/platform/architecture_unix.go | 20 + pkg/platform/architecture_windows.go | 60 + pkg/platform/platform.go | 23 + pkg/platform/utsname_int8.go | 18 + pkg/platform/utsname_int8_test.go | 16 + pkg/platform/utsname_uint8.go | 18 + pkg/platform/utsname_uint8_test.go | 16 + pkg/plugingetter/getter.go | 35 + pkg/plugins/client.go | 205 + pkg/plugins/client_test.go | 234 + pkg/plugins/discovery.go | 131 + pkg/plugins/discovery_test.go | 152 + pkg/plugins/discovery_unix.go | 5 + pkg/plugins/discovery_unix_test.go | 100 + pkg/plugins/discovery_windows.go | 8 + pkg/plugins/errors.go | 33 + pkg/plugins/plugin_test.go | 156 + pkg/plugins/pluginrpc-gen/README.md | 58 + pkg/plugins/pluginrpc-gen/fixtures/foo.go | 89 + .../fixtures/otherfixture/spaceship.go | 4 + pkg/plugins/pluginrpc-gen/main.go | 91 + pkg/plugins/pluginrpc-gen/parser.go | 263 + pkg/plugins/pluginrpc-gen/parser_test.go | 222 + pkg/plugins/pluginrpc-gen/template.go | 118 + pkg/plugins/plugins.go | 329 + pkg/plugins/plugins_unix.go | 9 + pkg/plugins/plugins_windows.go | 8 + pkg/plugins/transport/http.go | 36 + pkg/plugins/transport/http_test.go | 20 + pkg/plugins/transport/transport.go | 36 + pkg/pools/pools.go | 137 + pkg/pools/pools_test.go | 166 + pkg/progress/progress.go | 89 + pkg/progress/progressreader.go | 66 + pkg/progress/progressreader_test.go | 75 + pkg/progress/progresssink.go | 53 + pkg/promise/promise.go | 11 + pkg/promise/promise_test.go | 25 + pkg/pubsub/publisher.go | 111 + pkg/pubsub/publisher_test.go | 142 + pkg/reexec/README.md | 5 + pkg/reexec/command_linux.go | 28 + pkg/reexec/command_unix.go | 23 + pkg/reexec/command_unsupported.go | 12 + pkg/reexec/command_windows.go | 23 + pkg/reexec/reexec.go | 47 + pkg/reexec/reexec_test.go | 53 + pkg/registrar/registrar.go | 130 + pkg/registrar/registrar_test.go | 119 + pkg/signal/README.md | 1 + pkg/signal/signal.go | 54 + pkg/signal/signal_darwin.go | 41 + pkg/signal/signal_freebsd.go | 43 + pkg/signal/signal_linux.go | 80 + pkg/signal/signal_linux_test.go | 58 + pkg/signal/signal_solaris.go | 42 + pkg/signal/signal_test.go | 33 + pkg/signal/signal_unix.go | 21 + pkg/signal/signal_unsupported.go | 10 + pkg/signal/signal_windows.go | 28 + pkg/signal/trap.go | 103 + pkg/stdcopy/stdcopy.go | 190 + pkg/stdcopy/stdcopy_test.go | 289 + pkg/streamformatter/streamformatter.go | 159 + pkg/streamformatter/streamformatter_test.go | 109 + pkg/streamformatter/streamwriter.go | 47 + pkg/streamformatter/streamwriter_test.go | 35 + pkg/stringid/README.md | 1 + pkg/stringid/stringid.go | 99 + pkg/stringid/stringid_test.go | 72 + pkg/stringutils/README.md | 1 + pkg/stringutils/stringutils.go | 99 + pkg/stringutils/stringutils_test.go | 121 + pkg/symlink/LICENSE.APACHE | 191 + pkg/symlink/LICENSE.BSD | 27 + pkg/symlink/README.md | 6 + pkg/symlink/fs.go | 144 + pkg/symlink/fs_unix.go | 15 + pkg/symlink/fs_unix_test.go | 407 + pkg/symlink/fs_windows.go | 169 + pkg/sysinfo/README.md | 1 + pkg/sysinfo/numcpu.go | 12 + pkg/sysinfo/numcpu_linux.go | 43 + pkg/sysinfo/numcpu_windows.go | 37 + pkg/sysinfo/sysinfo.go | 144 + pkg/sysinfo/sysinfo_linux.go | 259 + pkg/sysinfo/sysinfo_linux_test.go | 104 + pkg/sysinfo/sysinfo_solaris.go | 121 + pkg/sysinfo/sysinfo_test.go | 26 + pkg/sysinfo/sysinfo_unix.go | 9 + pkg/sysinfo/sysinfo_windows.go | 9 + pkg/system/chtimes.go | 35 + pkg/system/chtimes_test.go | 94 + pkg/system/chtimes_unix.go | 14 + pkg/system/chtimes_unix_test.go | 91 + pkg/system/chtimes_windows.go | 27 + pkg/system/chtimes_windows_test.go | 86 + pkg/system/errors.go | 10 + pkg/system/events_windows.go | 85 + pkg/system/exitcode.go | 33 + pkg/system/filesys.go | 67 + pkg/system/filesys_windows.go | 298 + pkg/system/init.go | 22 + pkg/system/init_windows.go | 17 + pkg/system/lcow_unix.go | 8 + pkg/system/lcow_windows.go | 6 + pkg/system/lstat_unix.go | 17 + pkg/system/lstat_unix_test.go | 30 + pkg/system/lstat_windows.go | 14 + pkg/system/meminfo.go | 17 + pkg/system/meminfo_linux.go | 65 + pkg/system/meminfo_solaris.go | 129 + pkg/system/meminfo_unix_test.go | 40 + pkg/system/meminfo_unsupported.go | 8 + pkg/system/meminfo_windows.go | 45 + pkg/system/mknod.go | 22 + pkg/system/mknod_windows.go | 13 + pkg/system/path.go | 21 + pkg/system/path_unix.go | 9 + pkg/system/path_windows.go | 33 + pkg/system/path_windows_test.go | 78 + pkg/system/process_unix.go | 22 + pkg/system/rm.go | 80 + pkg/system/rm_test.go | 84 + pkg/system/stat_darwin.go | 13 + pkg/system/stat_freebsd.go | 13 + pkg/system/stat_linux.go | 19 + pkg/system/stat_openbsd.go | 13 + pkg/system/stat_solaris.go | 13 + pkg/system/stat_unix.go | 58 + pkg/system/stat_unix_test.go | 39 + pkg/system/stat_windows.go | 49 + pkg/system/syscall_unix.go | 17 + pkg/system/syscall_windows.go | 122 + pkg/system/syscall_windows_test.go | 9 + pkg/system/umask.go | 13 + pkg/system/umask_windows.go | 9 + pkg/system/utimes_freebsd.go | 22 + pkg/system/utimes_linux.go | 26 + pkg/system/utimes_unix_test.go | 68 + pkg/system/utimes_unsupported.go | 10 + pkg/system/xattrs_linux.go | 63 + pkg/system/xattrs_unsupported.go | 13 + pkg/tailfile/tailfile.go | 66 + pkg/tailfile/tailfile_test.go | 148 + pkg/tarsplitutils/tarstream.go | 127 + pkg/tarsum/builder_context.go | 21 + pkg/tarsum/builder_context_test.go | 67 + pkg/tarsum/fileinfosums.go | 126 + pkg/tarsum/fileinfosums_test.go | 62 + pkg/tarsum/tarsum.go | 295 + pkg/tarsum/tarsum_spec.md | 230 + pkg/tarsum/tarsum_test.go | 664 + .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes pkg/tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes pkg/tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes pkg/tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes pkg/tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes pkg/tarsum/testdata/xattr/json | 1 + pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes pkg/tarsum/versioning.go | 158 + pkg/tarsum/versioning_test.go | 98 + pkg/tarsum/writercloser.go | 22 + pkg/templates/templates.go | 78 + pkg/templates/templates_test.go | 88 + pkg/term/ascii.go | 66 + pkg/term/ascii_test.go | 43 + pkg/term/proxy.go | 74 + pkg/term/proxy_test.go | 92 + pkg/term/tc.go | 21 + pkg/term/tc_solaris_cgo.go | 65 + pkg/term/term.go | 124 + pkg/term/term_linux_test.go | 120 + pkg/term/term_windows.go | 233 + pkg/term/termios_bsd.go | 42 + pkg/term/termios_linux.go | 38 + pkg/term/windows/ansi_reader.go | 263 + pkg/term/windows/ansi_writer.go | 64 + pkg/term/windows/console.go | 35 + pkg/term/windows/windows.go | 33 + pkg/term/windows/windows_test.go | 3 + pkg/term/winsize.go | 30 + pkg/term/winsize_solaris_cgo.go | 42 + pkg/testutil/cmd/command.go | 307 + pkg/testutil/cmd/command_test.go | 118 + pkg/testutil/golden/golden.go | 28 + pkg/testutil/helpers.go | 33 + pkg/testutil/pkg.go | 1 + pkg/testutil/tempfile/tempfile.go | 56 + pkg/testutil/utils.go | 218 + pkg/testutil/utils_test.go | 341 + pkg/tlsconfig/tlsconfig_clone.go | 11 + pkg/tlsconfig/tlsconfig_clone_go17.go | 33 + pkg/truncindex/truncindex.go | 139 + pkg/truncindex/truncindex_test.go | 453 + pkg/urlutil/urlutil.go | 44 + pkg/urlutil/urlutil_test.go | 56 + pkg/useragent/README.md | 1 + pkg/useragent/useragent.go | 55 + pkg/useragent/useragent_test.go | 31 + plugin/backend_linux.go | 865 + plugin/backend_unsupported.go | 72 + plugin/blobstore.go | 190 + plugin/defs.go | 26 + plugin/manager.go | 381 + plugin/manager_linux.go | 320 + plugin/manager_solaris.go | 28 + plugin/manager_test.go | 55 + plugin/manager_windows.go | 30 + plugin/store.go | 270 + plugin/store_test.go | 33 + plugin/v2/plugin.go | 244 + plugin/v2/plugin_linux.go | 132 + plugin/v2/plugin_unsupported.go | 14 + plugin/v2/settable.go | 102 + plugin/v2/settable_test.go | 91 + poule.yml | 131 + profiles/apparmor/apparmor.go | 114 + profiles/apparmor/template.go | 46 + profiles/seccomp/default.json | 750 + profiles/seccomp/fixtures/example.json | 27 + profiles/seccomp/generate.go | 32 + profiles/seccomp/seccomp.go | 150 + profiles/seccomp/seccomp_default.go | 640 + profiles/seccomp/seccomp_test.go | 32 + profiles/seccomp/seccomp_unsupported.go | 13 + project/ARM.md | 45 + project/BRANCHES-AND-TAGS.md | 35 + project/CONTRIBUTING.md | 1 + project/GOVERNANCE.md | 17 + project/IRC-ADMINISTRATION.md | 37 + project/ISSUE-TRIAGE.md | 132 + project/PACKAGE-REPO-MAINTENANCE.md | 74 + project/PACKAGERS.md | 307 + project/PATCH-RELEASES.md | 68 + project/PRINCIPLES.md | 19 + project/README.md | 24 + project/RELEASE-CHECKLIST.md | 519 + project/RELEASE-PROCESS.md | 78 + project/REVIEWING.md | 246 + project/TOOLS.md | 63 + reference/store.go | 343 + reference/store_test.go | 358 + registry/auth.go | 303 + registry/auth_test.go | 124 + registry/config.go | 456 + registry/config_test.go | 260 + registry/config_unix.go | 25 + registry/config_windows.go | 25 + registry/endpoint_test.go | 78 + registry/endpoint_v1.go | 198 + registry/registry.go | 191 + registry/registry_mock_test.go | 478 + registry/registry_test.go | 917 + registry/resumable/resumablerequestreader.go | 96 + registry/resumable/resumablerequestreader_test.go | 256 + registry/service.go | 327 + registry/service_v1.go | 40 + registry/service_v1_test.go | 23 + registry/service_v2.go | 82 + registry/session.go | 778 + registry/types.go | 70 + reports/2017-05-01.md | 35 + reports/2017-05-08.md | 34 + reports/2017-05-15.md | 52 + reports/2017-06-05.md | 36 + reports/2017-06-12.md | 78 + reports/builder/2017-05-01.md | 47 + reports/builder/2017-05-08.md | 57 + reports/builder/2017-05-15.md | 64 + reports/builder/2017-05-22.md | 47 + reports/builder/2017-05-29.md | 52 + reports/builder/2017-06-05.md | 58 + reports/builder/2017-06-12.md | 58 + restartmanager/restartmanager.go | 134 + restartmanager/restartmanager_test.go | 39 + runconfig/config.go | 108 + runconfig/config_test.go | 139 + runconfig/config_unix.go | 59 + runconfig/config_windows.go | 19 + runconfig/errors.go | 38 + runconfig/fixtures/unix/container_config_1_14.json | 30 + runconfig/fixtures/unix/container_config_1_17.json | 50 + runconfig/fixtures/unix/container_config_1_19.json | 58 + .../fixtures/unix/container_hostconfig_1_14.json | 18 + .../fixtures/unix/container_hostconfig_1_19.json | 30 + .../fixtures/windows/container_config_1_19.json | 58 + runconfig/hostconfig.go | 80 + runconfig/hostconfig_solaris.go | 46 + runconfig/hostconfig_test.go | 283 + runconfig/hostconfig_unix.go | 110 + runconfig/hostconfig_windows.go | 96 + runconfig/hostconfig_windows_test.go | 17 + runconfig/opts/parse.go | 20 + vendor.conf | 160 + vendor/cloud.google.com/go/LICENSE | 202 + vendor/cloud.google.com/go/README.md | 452 + .../go/compute/metadata/metadata.go | 438 + vendor/cloud.google.com/go/internal/cloud.go | 64 + vendor/cloud.google.com/go/internal/retry.go | 55 + vendor/cloud.google.com/go/logging/apiv2/README.md | 11 + .../go/logging/apiv2/config_client.go | 300 + vendor/cloud.google.com/go/logging/apiv2/doc.go | 26 + .../go/logging/apiv2/logging_client.go | 359 + .../go/logging/apiv2/metrics_client.go | 299 + vendor/cloud.google.com/go/logging/doc.go | 89 + .../cloud.google.com/go/logging/internal/common.go | 30 + vendor/cloud.google.com/go/logging/logging.go | 674 + vendor/github.com/Azure/go-ansiterm/LICENSE | 21 + vendor/github.com/Azure/go-ansiterm/README.md | 9 + vendor/github.com/Azure/go-ansiterm/constants.go | 188 + vendor/github.com/Azure/go-ansiterm/context.go | 7 + .../Azure/go-ansiterm/csi_entry_state.go | 49 + .../Azure/go-ansiterm/csi_param_state.go | 38 + .../Azure/go-ansiterm/escape_intermediate_state.go | 36 + .../github.com/Azure/go-ansiterm/escape_state.go | 47 + .../github.com/Azure/go-ansiterm/event_handler.go | 90 + .../github.com/Azure/go-ansiterm/ground_state.go | 24 + .../Azure/go-ansiterm/osc_string_state.go | 31 + vendor/github.com/Azure/go-ansiterm/parser.go | 136 + .../Azure/go-ansiterm/parser_action_helpers.go | 103 + .../github.com/Azure/go-ansiterm/parser_actions.go | 122 + vendor/github.com/Azure/go-ansiterm/states.go | 71 + vendor/github.com/Azure/go-ansiterm/utilities.go | 21 + .../github.com/Azure/go-ansiterm/winterm/ansi.go | 182 + vendor/github.com/Azure/go-ansiterm/winterm/api.go | 322 + .../Azure/go-ansiterm/winterm/attr_translation.go | 100 + .../Azure/go-ansiterm/winterm/cursor_helpers.go | 101 + .../Azure/go-ansiterm/winterm/erase_helpers.go | 84 + .../Azure/go-ansiterm/winterm/scroll_helper.go | 118 + .../Azure/go-ansiterm/winterm/utilities.go | 9 + .../Azure/go-ansiterm/winterm/win_event_handler.go | 726 + vendor/github.com/BurntSushi/toml/COPYING | 14 + vendor/github.com/BurntSushi/toml/README.md | 220 + vendor/github.com/BurntSushi/toml/decode.go | 492 + vendor/github.com/BurntSushi/toml/decode_meta.go | 122 + vendor/github.com/BurntSushi/toml/doc.go | 27 + vendor/github.com/BurntSushi/toml/encode.go | 496 + .../github.com/BurntSushi/toml/encoding_types.go | 19 + .../BurntSushi/toml/encoding_types_1.1.go | 18 + vendor/github.com/BurntSushi/toml/lex.go | 874 + vendor/github.com/BurntSushi/toml/parse.go | 498 + vendor/github.com/BurntSushi/toml/type_check.go | 91 + vendor/github.com/BurntSushi/toml/type_fields.go | 241 + vendor/github.com/Graylog2/go-gelf/LICENSE | 21 + vendor/github.com/Graylog2/go-gelf/README.md | 76 + vendor/github.com/Graylog2/go-gelf/gelf/reader.go | 140 + vendor/github.com/Graylog2/go-gelf/gelf/writer.go | 418 + vendor/github.com/Microsoft/go-winio/LICENSE | 22 + vendor/github.com/Microsoft/go-winio/README.md | 22 + .../Microsoft/go-winio/archive/tar/LICENSE | 27 + .../Microsoft/go-winio/archive/tar/common.go | 344 + .../Microsoft/go-winio/archive/tar/reader.go | 1002 ++ .../Microsoft/go-winio/archive/tar/stat_atim.go | 20 + .../go-winio/archive/tar/stat_atimespec.go | 20 + .../Microsoft/go-winio/archive/tar/stat_unix.go | 32 + .../Microsoft/go-winio/archive/tar/writer.go | 444 + vendor/github.com/Microsoft/go-winio/backup.go | 270 + .../Microsoft/go-winio/backuptar/noop.go | 4 + .../github.com/Microsoft/go-winio/backuptar/tar.go | 353 + vendor/github.com/Microsoft/go-winio/file.go | 295 + vendor/github.com/Microsoft/go-winio/fileinfo.go | 60 + vendor/github.com/Microsoft/go-winio/pipe.go | 404 + vendor/github.com/Microsoft/go-winio/privilege.go | 202 + vendor/github.com/Microsoft/go-winio/reparse.go | 128 + vendor/github.com/Microsoft/go-winio/sd.go | 98 + vendor/github.com/Microsoft/go-winio/syscall.go | 3 + .../Microsoft/go-winio/zsyscall_windows.go | 528 + vendor/github.com/Microsoft/hcsshim/LICENSE | 21 + vendor/github.com/Microsoft/hcsshim/README.md | 12 + .../github.com/Microsoft/hcsshim/activatelayer.go | 28 + vendor/github.com/Microsoft/hcsshim/baselayer.go | 183 + vendor/github.com/Microsoft/hcsshim/callback.go | 79 + vendor/github.com/Microsoft/hcsshim/cgo.go | 7 + vendor/github.com/Microsoft/hcsshim/container.go | 738 + vendor/github.com/Microsoft/hcsshim/createlayer.go | 27 + .../Microsoft/hcsshim/createsandboxlayer.go | 35 + .../Microsoft/hcsshim/deactivatelayer.go | 26 + .../github.com/Microsoft/hcsshim/destroylayer.go | 27 + vendor/github.com/Microsoft/hcsshim/errors.go | 239 + .../Microsoft/hcsshim/expandsandboxsize.go | 26 + vendor/github.com/Microsoft/hcsshim/exportlayer.go | 156 + .../Microsoft/hcsshim/getlayermountpath.go | 55 + .../Microsoft/hcsshim/getsharedbaseimages.go | 22 + vendor/github.com/Microsoft/hcsshim/guid.go | 19 + vendor/github.com/Microsoft/hcsshim/hcsshim.go | 166 + vendor/github.com/Microsoft/hcsshim/hnsfuncs.go | 271 + vendor/github.com/Microsoft/hcsshim/importlayer.go | 212 + vendor/github.com/Microsoft/hcsshim/interface.go | 174 + vendor/github.com/Microsoft/hcsshim/layerexists.go | 30 + vendor/github.com/Microsoft/hcsshim/layerutils.go | 111 + vendor/github.com/Microsoft/hcsshim/legacy.go | 731 + vendor/github.com/Microsoft/hcsshim/nametoguid.go | 20 + .../github.com/Microsoft/hcsshim/preparelayer.go | 46 + vendor/github.com/Microsoft/hcsshim/process.go | 384 + .../github.com/Microsoft/hcsshim/processimage.go | 23 + .../github.com/Microsoft/hcsshim/unpreparelayer.go | 27 + vendor/github.com/Microsoft/hcsshim/utils.go | 33 + vendor/github.com/Microsoft/hcsshim/version.go | 7 + vendor/github.com/Microsoft/hcsshim/waithelper.go | 63 + vendor/github.com/Microsoft/hcsshim/zhcsshim.go | 1042 ++ vendor/github.com/Nvveen/Gotty/LICENSE | 26 + vendor/github.com/Nvveen/Gotty/README | 5 + vendor/github.com/Nvveen/Gotty/attributes.go | 514 + vendor/github.com/Nvveen/Gotty/gotty.go | 244 + vendor/github.com/Nvveen/Gotty/parser.go | 362 + vendor/github.com/Nvveen/Gotty/types.go | 23 + vendor/github.com/RackSec/srslog/LICENSE | 27 + vendor/github.com/RackSec/srslog/README.md | 131 + vendor/github.com/RackSec/srslog/constants.go | 68 + vendor/github.com/RackSec/srslog/dialer.go | 87 + vendor/github.com/RackSec/srslog/formatter.go | 48 + vendor/github.com/RackSec/srslog/framer.go | 24 + vendor/github.com/RackSec/srslog/net_conn.go | 30 + vendor/github.com/RackSec/srslog/srslog.go | 97 + vendor/github.com/RackSec/srslog/srslog_unix.go | 54 + vendor/github.com/RackSec/srslog/writer.go | 185 + vendor/github.com/Sirupsen/logrus/LICENSE | 21 + vendor/github.com/Sirupsen/logrus/README.md | 425 + vendor/github.com/Sirupsen/logrus/alt_exit.go | 64 + vendor/github.com/Sirupsen/logrus/doc.go | 26 + vendor/github.com/Sirupsen/logrus/entry.go | 275 + vendor/github.com/Sirupsen/logrus/exported.go | 193 + vendor/github.com/Sirupsen/logrus/formatter.go | 45 + vendor/github.com/Sirupsen/logrus/hooks.go | 34 + .../github.com/Sirupsen/logrus/json_formatter.go | 41 + vendor/github.com/Sirupsen/logrus/logger.go | 308 + vendor/github.com/Sirupsen/logrus/logrus.go | 143 + .../Sirupsen/logrus/terminal_appengine.go | 8 + vendor/github.com/Sirupsen/logrus/terminal_bsd.go | 10 + .../github.com/Sirupsen/logrus/terminal_linux.go | 14 + .../Sirupsen/logrus/terminal_notwindows.go | 22 + .../github.com/Sirupsen/logrus/terminal_solaris.go | 15 + .../github.com/Sirupsen/logrus/terminal_windows.go | 27 + .../github.com/Sirupsen/logrus/text_formatter.go | 168 + vendor/github.com/Sirupsen/logrus/writer.go | 53 + vendor/github.com/agl/ed25519/LICENSE | 27 + vendor/github.com/agl/ed25519/ed25519.go | 125 + .../github.com/agl/ed25519/edwards25519/const.go | 1411 ++ .../agl/ed25519/edwards25519/edwards25519.go | 2127 +++ vendor/github.com/armon/go-metrics/LICENSE | 20 + vendor/github.com/armon/go-metrics/README.md | 68 + vendor/github.com/armon/go-metrics/const_unix.go | 12 + .../github.com/armon/go-metrics/const_windows.go | 13 + vendor/github.com/armon/go-metrics/inmem.go | 239 + vendor/github.com/armon/go-metrics/inmem_signal.go | 100 + vendor/github.com/armon/go-metrics/metrics.go | 115 + vendor/github.com/armon/go-metrics/sink.go | 52 + vendor/github.com/armon/go-metrics/start.go | 95 + vendor/github.com/armon/go-metrics/statsd.go | 154 + vendor/github.com/armon/go-metrics/statsite.go | 142 + vendor/github.com/armon/go-radix/LICENSE | 20 + vendor/github.com/armon/go-radix/README.md | 36 + vendor/github.com/armon/go-radix/radix.go | 467 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + vendor/github.com/aws/aws-sdk-go/README.md | 116 + .../github.com/aws/aws-sdk-go/aws/awserr/error.go | 145 + .../github.com/aws/aws-sdk-go/aws/awserr/types.go | 194 + .../github.com/aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../github.com/aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 107 + .../aws/aws-sdk-go/aws/awsutil/string_value.go | 89 + .../github.com/aws/aws-sdk-go/aws/client/client.go | 137 + .../aws/aws-sdk-go/aws/client/default_retryer.go | 90 + .../aws-sdk-go/aws/client/metadata/client_info.go | 12 + vendor/github.com/aws/aws-sdk-go/aws/config.go | 419 + .../github.com/aws/aws-sdk-go/aws/convert_types.go | 369 + .../aws/aws-sdk-go/aws/corehandlers/handlers.go | 182 + .../aws-sdk-go/aws/corehandlers/param_validator.go | 17 + .../aws-sdk-go/aws/credentials/chain_provider.go | 100 + .../aws/aws-sdk-go/aws/credentials/credentials.go | 223 + .../credentials/ec2rolecreds/ec2_role_provider.go | 178 + .../aws/credentials/endpointcreds/provider.go | 191 + .../aws/aws-sdk-go/aws/credentials/env_provider.go | 77 + .../aws/credentials/shared_credentials_provider.go | 151 + .../aws-sdk-go/aws/credentials/static_provider.go | 57 + .../credentials/stscreds/assume_role_provider.go | 161 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 130 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 162 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 + vendor/github.com/aws/aws-sdk-go/aws/errors.go | 17 + vendor/github.com/aws/aws-sdk-go/aws/logger.go | 112 + .../aws/aws-sdk-go/aws/request/handlers.go | 187 + .../aws/aws-sdk-go/aws/request/http_request.go | 24 + .../aws/aws-sdk-go/aws/request/offset_reader.go | 58 + .../aws/aws-sdk-go/aws/request/request.go | 344 + .../aws-sdk-go/aws/request/request_pagination.go | 104 + .../aws/aws-sdk-go/aws/request/retryer.go | 101 + .../aws/aws-sdk-go/aws/request/validation.go | 234 + .../github.com/aws/aws-sdk-go/aws/session/doc.go | 223 + .../aws/aws-sdk-go/aws/session/env_config.go | 188 + .../aws/aws-sdk-go/aws/session/session.go | 393 + .../aws/aws-sdk-go/aws/session/shared_config.go | 295 + .../aws/aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 + .../aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go | 24 + .../github.com/aws/aws-sdk-go/aws/signer/v4/v4.go | 713 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 106 + vendor/github.com/aws/aws-sdk-go/aws/version.go | 8 + vendor/github.com/aws/aws-sdk-go/private/README.md | 4 + .../aws/aws-sdk-go/private/endpoints/endpoints.go | 70 + .../aws-sdk-go/private/endpoints/endpoints_map.go | 95 + .../aws/aws-sdk-go/private/protocol/idempotency.go | 75 + .../private/protocol/json/jsonutil/build.go | 254 + .../private/protocol/json/jsonutil/unmarshal.go | 213 + .../aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go | 111 + .../aws/aws-sdk-go/private/protocol/query/build.go | 36 + .../private/protocol/query/queryutil/queryutil.go | 230 + .../aws-sdk-go/private/protocol/query/unmarshal.go | 35 + .../private/protocol/query/unmarshal_error.go | 66 + .../aws/aws-sdk-go/private/protocol/rest/build.go | 256 + .../aws-sdk-go/private/protocol/rest/payload.go | 45 + .../aws-sdk-go/private/protocol/rest/unmarshal.go | 198 + .../aws/aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private/protocol/xml/xmlutil/build.go | 293 + .../private/protocol/xml/xmlutil/unmarshal.go | 260 + .../private/protocol/xml/xmlutil/xml_to_struct.go | 105 + .../aws/aws-sdk-go/service/cloudwatchlogs/api.go | 4516 +++++ .../aws-sdk-go/service/cloudwatchlogs/service.go | 117 + .../github.com/aws/aws-sdk-go/service/sts/api.go | 1894 ++ .../aws/aws-sdk-go/service/sts/customizations.go | 12 + .../aws/aws-sdk-go/service/sts/service.go | 130 + vendor/github.com/beorn7/perks/LICENSE | 20 + vendor/github.com/beorn7/perks/README.md | 31 + vendor/github.com/beorn7/perks/quantile/stream.go | 292 + vendor/github.com/boltdb/bolt/LICENSE | 20 + vendor/github.com/boltdb/bolt/README.md | 857 + vendor/github.com/boltdb/bolt/bolt_386.go | 10 + vendor/github.com/boltdb/bolt/bolt_amd64.go | 10 + vendor/github.com/boltdb/bolt/bolt_arm.go | 28 + vendor/github.com/boltdb/bolt/bolt_arm64.go | 12 + vendor/github.com/boltdb/bolt/bolt_linux.go | 10 + vendor/github.com/boltdb/bolt/bolt_openbsd.go | 27 + vendor/github.com/boltdb/bolt/bolt_ppc.go | 9 + vendor/github.com/boltdb/bolt/bolt_ppc64.go | 9 + vendor/github.com/boltdb/bolt/bolt_ppc64le.go | 12 + vendor/github.com/boltdb/bolt/bolt_s390x.go | 12 + vendor/github.com/boltdb/bolt/bolt_unix.go | 89 + vendor/github.com/boltdb/bolt/bolt_unix_solaris.go | 90 + vendor/github.com/boltdb/bolt/bolt_windows.go | 144 + vendor/github.com/boltdb/bolt/boltsync_unix.go | 8 + vendor/github.com/boltdb/bolt/bucket.go | 778 + vendor/github.com/boltdb/bolt/cursor.go | 400 + vendor/github.com/boltdb/bolt/db.go | 1036 ++ vendor/github.com/boltdb/bolt/doc.go | 44 + vendor/github.com/boltdb/bolt/errors.go | 71 + vendor/github.com/boltdb/bolt/freelist.go | 248 + vendor/github.com/boltdb/bolt/node.go | 604 + vendor/github.com/boltdb/bolt/page.go | 178 + vendor/github.com/boltdb/bolt/tx.go | 682 + vendor/github.com/bsphere/le_go/LICENSE | 21 + vendor/github.com/bsphere/le_go/README.md | 37 + vendor/github.com/bsphere/le_go/le.go | 216 + vendor/github.com/cloudflare/cfssl/LICENSE | 24 + vendor/github.com/cloudflare/cfssl/README.md | 438 + vendor/github.com/cloudflare/cfssl/api/api.go | 231 + vendor/github.com/cloudflare/cfssl/auth/auth.go | 94 + .../github.com/cloudflare/cfssl/certdb/README.md | 75 + .../github.com/cloudflare/cfssl/certdb/certdb.go | 40 + .../github.com/cloudflare/cfssl/config/config.go | 659 + .../cloudflare/cfssl/crypto/pkcs7/pkcs7.go | 188 + vendor/github.com/cloudflare/cfssl/csr/csr.go | 431 + vendor/github.com/cloudflare/cfssl/errors/doc.go | 46 + vendor/github.com/cloudflare/cfssl/errors/error.go | 424 + vendor/github.com/cloudflare/cfssl/errors/http.go | 47 + .../cfssl/helpers/derhelpers/derhelpers.go | 42 + .../github.com/cloudflare/cfssl/helpers/helpers.go | 518 + vendor/github.com/cloudflare/cfssl/info/info.go | 15 + .../github.com/cloudflare/cfssl/initca/initca.go | 224 + vendor/github.com/cloudflare/cfssl/log/log.go | 162 + .../cloudflare/cfssl/ocsp/config/config.go | 13 + .../cloudflare/cfssl/signer/local/local.go | 469 + .../github.com/cloudflare/cfssl/signer/signer.go | 412 + vendor/github.com/cloudfoundry/gosigar/LICENSE | 201 + vendor/github.com/cloudfoundry/gosigar/NOTICE | 9 + vendor/github.com/cloudfoundry/gosigar/README.md | 22 + .../cloudfoundry/gosigar/concrete_sigar.go | 69 + .../cloudfoundry/gosigar/sigar_darwin.go | 467 + .../cloudfoundry/gosigar/sigar_format.go | 126 + .../cloudfoundry/gosigar/sigar_interface.go | 141 + .../github.com/cloudfoundry/gosigar/sigar_linux.go | 386 + .../github.com/cloudfoundry/gosigar/sigar_unix.go | 26 + .../github.com/cloudfoundry/gosigar/sigar_util.go | 22 + .../cloudfoundry/gosigar/sigar_windows.go | 100 + vendor/github.com/containerd/console/LICENSE | 24 + vendor/github.com/containerd/console/README.md | 44 + vendor/github.com/containerd/console/console.go | 52 + .../github.com/containerd/console/console_unix.go | 133 + .../containerd/console/console_windows.go | 199 + vendor/github.com/containerd/console/tc_darwin.go | 51 + vendor/github.com/containerd/console/tc_freebsd.go | 51 + vendor/github.com/containerd/console/tc_linux.go | 51 + .../github.com/containerd/containerd/LICENSE.code | 191 + .../github.com/containerd/containerd/LICENSE.docs | 425 + vendor/github.com/containerd/containerd/NOTICE | 16 + vendor/github.com/containerd/containerd/README.md | 72 + .../containerd/api/grpc/server/server.go | 494 + .../containerd/api/grpc/server/server_linux.go | 66 + .../containerd/api/grpc/server/server_solaris.go | 41 + .../containerd/containerd/api/grpc/types/api.pb.go | 2584 +++ .../containerd/containerd/api/grpc/types/api.proto | 344 + .../containerd/containerd/api/http/pprof/pprof.go | 19 + .../containerd/containerd/archutils/epoll.go | 22 + .../containerd/containerd/archutils/epoll_arm64.go | 73 + .../containerd/containerd-shim/console.go | 56 + .../containerd/containerd-shim/console_solaris.go | 14 + .../containerd/containerd/containerd-shim/main.go | 169 + .../containerd/containerd-shim/process.go | 308 + .../containerd/containerd-shim/process_linux.go | 190 + .../containerd/containerd-shim/process_solaris.go | 74 + .../containerd/containerd/containerd/main.go | 280 + .../containerd/containerd/containerd/main_linux.go | 45 + .../containerd/containerd/main_solaris.go | 4 + .../containerd/containerd/ctr/checkpoint_linux.go | 165 + .../containerd/ctr/checkpoint_solaris.go | 36 + .../github.com/containerd/containerd/ctr/const.go | 12 + .../containerd/containerd/ctr/container.go | 718 + .../github.com/containerd/containerd/ctr/events.go | 63 + .../github.com/containerd/containerd/ctr/main.go | 90 + .../github.com/containerd/containerd/ctr/sort.go | 27 + .../containerd/containerd/osutils/fds.go | 18 + .../containerd/osutils/pdeathsig_linux.go | 15 + .../containerd/osutils/pdeathsig_solaris.go | 8 + .../containerd/containerd/osutils/prctl.go | 48 + .../containerd/containerd/osutils/prctl_solaris.go | 19 + .../containerd/containerd/osutils/reaper.go | 51 + .../containerd/containerd/runtime/container.go | 748 + .../containerd/runtime/container_linux.go | 190 + .../containerd/runtime/container_solaris.go | 48 + .../containerd/containerd/runtime/process.go | 476 + .../containerd/containerd/runtime/process_linux.go | 22 + .../containerd/runtime/process_solaris.go | 34 + .../containerd/containerd/runtime/runtime.go | 132 + .../containerd/containerd/runtime/stats.go | 87 + .../containerd/containerd/specs/spec_linux.go | 12 + .../containerd/containerd/specs/spec_solaris.go | 10 + .../containerd/supervisor/add_process.go | 53 + .../containerd/containerd/supervisor/checkpoint.go | 37 + .../containerd/containerd/supervisor/create.go | 71 + .../containerd/supervisor/create_solaris.go | 8 + .../containerd/containerd/supervisor/delete.go | 56 + .../containerd/containerd/supervisor/errors.go | 28 + .../containerd/containerd/supervisor/exit.go | 93 + .../containerd/supervisor/get_containers.go | 47 + .../containerd/containerd/supervisor/machine.go | 28 + .../containerd/supervisor/machine_solaris.go | 41 + .../containerd/containerd/supervisor/metrics.go | 42 + .../containerd/supervisor/monitor_linux.go | 147 + .../containerd/supervisor/monitor_solaris.go | 144 + .../containerd/containerd/supervisor/oom.go | 23 + .../containerd/containerd/supervisor/signal.go | 30 + .../containerd/containerd/supervisor/sort.go | 27 + .../containerd/containerd/supervisor/stats.go | 34 + .../containerd/containerd/supervisor/supervisor.go | 452 + .../containerd/containerd/supervisor/task.go | 34 + .../containerd/containerd/supervisor/types.go | 12 + .../containerd/containerd/supervisor/update.go | 95 + .../containerd/containerd/supervisor/worker.go | 104 + vendor/github.com/containerd/containerd/version.go | 20 + vendor/github.com/containerd/go-runc/LICENSE | 24 + vendor/github.com/containerd/go-runc/README.md | 42 + vendor/github.com/containerd/go-runc/console.go | 98 + vendor/github.com/containerd/go-runc/container.go | 14 + vendor/github.com/containerd/go-runc/events.go | 84 + vendor/github.com/containerd/go-runc/io.go | 165 + vendor/github.com/containerd/go-runc/monitor.go | 50 + vendor/github.com/containerd/go-runc/runc.go | 553 + vendor/github.com/containerd/go-runc/utils.go | 28 + vendor/github.com/coreos/etcd/LICENSE | 202 + vendor/github.com/coreos/etcd/NOTICE | 5 + vendor/github.com/coreos/etcd/README.md | 139 + vendor/github.com/coreos/etcd/pkg/README.md | 2 + vendor/github.com/coreos/etcd/pkg/crc/crc.go | 43 + .../coreos/etcd/pkg/fileutil/dir_unix.go | 22 + .../coreos/etcd/pkg/fileutil/dir_windows.go | 46 + .../coreos/etcd/pkg/fileutil/fileutil.go | 121 + vendor/github.com/coreos/etcd/pkg/fileutil/lock.go | 26 + .../coreos/etcd/pkg/fileutil/lock_flock.go | 49 + .../coreos/etcd/pkg/fileutil/lock_linux.go | 96 + .../coreos/etcd/pkg/fileutil/lock_plan9.go | 45 + .../coreos/etcd/pkg/fileutil/lock_solaris.go | 62 + .../coreos/etcd/pkg/fileutil/lock_unix.go | 29 + .../coreos/etcd/pkg/fileutil/lock_windows.go | 125 + .../coreos/etcd/pkg/fileutil/preallocate.go | 47 + .../coreos/etcd/pkg/fileutil/preallocate_darwin.go | 43 + .../coreos/etcd/pkg/fileutil/preallocate_unix.go | 49 + .../etcd/pkg/fileutil/preallocate_unsupported.go | 25 + .../github.com/coreos/etcd/pkg/fileutil/purge.go | 78 + vendor/github.com/coreos/etcd/pkg/fileutil/sync.go | 29 + .../coreos/etcd/pkg/fileutil/sync_darwin.go | 40 + .../coreos/etcd/pkg/fileutil/sync_linux.go | 34 + vendor/github.com/coreos/etcd/pkg/idutil/id.go | 78 + .../coreos/etcd/pkg/ioutil/pagewriter.go | 106 + .../coreos/etcd/pkg/ioutil/readcloser.go | 66 + vendor/github.com/coreos/etcd/pkg/ioutil/reader.go | 40 + vendor/github.com/coreos/etcd/pkg/ioutil/util.go | 43 + vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go | 60 + vendor/github.com/coreos/etcd/raft/README.md | 247 + vendor/github.com/coreos/etcd/raft/doc.go | 300 + vendor/github.com/coreos/etcd/raft/log.go | 358 + vendor/github.com/coreos/etcd/raft/log_unstable.go | 139 + vendor/github.com/coreos/etcd/raft/logger.go | 126 + vendor/github.com/coreos/etcd/raft/node.go | 521 + vendor/github.com/coreos/etcd/raft/progress.go | 279 + vendor/github.com/coreos/etcd/raft/raft.go | 1252 ++ .../github.com/coreos/etcd/raft/raftpb/raft.pb.go | 1900 ++ .../github.com/coreos/etcd/raft/raftpb/raft.proto | 93 + vendor/github.com/coreos/etcd/raft/rawnode.go | 264 + vendor/github.com/coreos/etcd/raft/read_only.go | 118 + vendor/github.com/coreos/etcd/raft/status.go | 76 + vendor/github.com/coreos/etcd/raft/storage.go | 271 + vendor/github.com/coreos/etcd/raft/util.go | 129 + vendor/github.com/coreos/etcd/snap/db.go | 74 + vendor/github.com/coreos/etcd/snap/message.go | 64 + vendor/github.com/coreos/etcd/snap/metrics.go | 41 + .../github.com/coreos/etcd/snap/snappb/snap.pb.go | 353 + .../github.com/coreos/etcd/snap/snappb/snap.proto | 14 + vendor/github.com/coreos/etcd/snap/snapshotter.go | 204 + vendor/github.com/coreos/etcd/wal/decoder.go | 185 + vendor/github.com/coreos/etcd/wal/doc.go | 75 + vendor/github.com/coreos/etcd/wal/encoder.go | 120 + vendor/github.com/coreos/etcd/wal/file_pipeline.go | 97 + vendor/github.com/coreos/etcd/wal/metrics.go | 31 + vendor/github.com/coreos/etcd/wal/repair.go | 99 + vendor/github.com/coreos/etcd/wal/util.go | 107 + vendor/github.com/coreos/etcd/wal/wal.go | 637 + vendor/github.com/coreos/etcd/wal/wal_unix.go | 44 + vendor/github.com/coreos/etcd/wal/wal_windows.go | 41 + vendor/github.com/coreos/etcd/wal/walpb/record.go | 29 + .../github.com/coreos/etcd/wal/walpb/record.pb.go | 521 + .../github.com/coreos/etcd/wal/walpb/record.proto | 20 + vendor/github.com/coreos/go-systemd/LICENSE | 191 + vendor/github.com/coreos/go-systemd/README.md | 54 + .../coreos/go-systemd/activation/files.go | 52 + .../coreos/go-systemd/activation/listeners.go | 60 + .../coreos/go-systemd/activation/packetconns.go | 37 + .../coreos/go-systemd/daemon/sdnotify.go | 63 + .../coreos/go-systemd/daemon/watchdog.go | 72 + vendor/github.com/coreos/go-systemd/dbus/dbus.go | 213 + .../github.com/coreos/go-systemd/dbus/methods.go | 565 + .../coreos/go-systemd/dbus/properties.go | 237 + vendor/github.com/coreos/go-systemd/dbus/set.go | 47 + .../coreos/go-systemd/dbus/subscription.go | 250 + .../coreos/go-systemd/dbus/subscription_set.go | 57 + .../coreos/go-systemd/journal/journal.go | 179 + vendor/github.com/coreos/go-systemd/util/util.go | 90 + .../github.com/coreos/go-systemd/util/util_cgo.go | 174 + .../github.com/coreos/go-systemd/util/util_stub.go | 23 + vendor/github.com/coreos/pkg/LICENSE | 202 + vendor/github.com/coreos/pkg/NOTICE | 5 + vendor/github.com/coreos/pkg/README.md | 4 + vendor/github.com/coreos/pkg/capnslog/README.md | 39 + .../github.com/coreos/pkg/capnslog/formatters.go | 157 + .../coreos/pkg/capnslog/glog_formatter.go | 96 + vendor/github.com/coreos/pkg/capnslog/init.go | 49 + .../github.com/coreos/pkg/capnslog/init_windows.go | 25 + .../coreos/pkg/capnslog/journald_formatter.go | 68 + .../github.com/coreos/pkg/capnslog/log_hijack.go | 39 + vendor/github.com/coreos/pkg/capnslog/logmap.go | 240 + .../github.com/coreos/pkg/capnslog/pkg_logger.go | 177 + .../coreos/pkg/capnslog/syslog_formatter.go | 65 + vendor/github.com/coreos/pkg/dlopen/dlopen.go | 82 + .../github.com/coreos/pkg/dlopen/dlopen_example.go | 56 + .../cyberdelia/go-metrics-graphite/LICENSE | 22 + .../cyberdelia/go-metrics-graphite/README.md | 19 + .../cyberdelia/go-metrics-graphite/graphite.go | 113 + vendor/github.com/davecgh/go-spew/LICENSE | 15 + vendor/github.com/davecgh/go-spew/README.md | 205 + vendor/github.com/davecgh/go-spew/spew/bypass.go | 152 + .../github.com/davecgh/go-spew/spew/bypasssafe.go | 38 + vendor/github.com/davecgh/go-spew/spew/common.go | 341 + vendor/github.com/davecgh/go-spew/spew/config.go | 306 + vendor/github.com/davecgh/go-spew/spew/doc.go | 211 + vendor/github.com/davecgh/go-spew/spew/dump.go | 509 + vendor/github.com/davecgh/go-spew/spew/format.go | 419 + vendor/github.com/davecgh/go-spew/spew/spew.go | 148 + vendor/github.com/deckarep/golang-set/LICENSE | 22 + vendor/github.com/deckarep/golang-set/README.md | 94 + vendor/github.com/deckarep/golang-set/set.go | 168 + .../github.com/deckarep/golang-set/threadsafe.go | 204 + .../github.com/deckarep/golang-set/threadunsafe.go | 246 + vendor/github.com/docker/cli/LICENSE | 191 + vendor/github.com/docker/cli/NOTICE | 19 + vendor/github.com/docker/cli/README.md | 63 + vendor/github.com/docker/cli/VERSION | 1 + vendor/github.com/docker/cli/cli/cobra.go | 150 + vendor/github.com/docker/cli/cli/command/cli.go | 299 + .../docker/cli/cli/command/commands/commands.go | 92 + .../docker/cli/cli/command/container/attach.go | 164 + .../docker/cli/cli/command/container/cmd.go | 46 + .../docker/cli/cli/command/container/commit.go | 75 + .../docker/cli/cli/command/container/cp.go | 305 + .../docker/cli/cli/command/container/create.go | 224 + .../docker/cli/cli/command/container/diff.go | 46 + .../docker/cli/cli/command/container/exec.go | 220 + .../docker/cli/cli/command/container/export.go | 58 + .../docker/cli/cli/command/container/hijack.go | 207 + .../docker/cli/cli/command/container/inspect.go | 46 + .../docker/cli/cli/command/container/kill.go | 56 + .../docker/cli/cli/command/container/list.go | 140 + .../docker/cli/cli/command/container/logs.go | 76 + .../docker/cli/cli/command/container/opts.go | 841 + .../docker/cli/cli/command/container/pause.go | 49 + .../docker/cli/cli/command/container/port.go | 78 + .../docker/cli/cli/command/container/prune.go | 78 + .../docker/cli/cli/command/container/rename.go | 51 + .../docker/cli/cli/command/container/restart.go | 62 + .../docker/cli/cli/command/container/rm.go | 73 + .../docker/cli/cli/command/container/run.go | 337 + .../docker/cli/cli/command/container/start.go | 195 + .../docker/cli/cli/command/container/stats.go | 243 + .../cli/cli/command/container/stats_helpers.go | 238 + .../docker/cli/cli/command/container/stop.go | 67 + .../docker/cli/cli/command/container/top.go | 57 + .../docker/cli/cli/command/container/tty.go | 103 + .../docker/cli/cli/command/container/unpause.go | 50 + .../docker/cli/cli/command/container/update.go | 133 + .../docker/cli/cli/command/container/utils.go | 172 + .../docker/cli/cli/command/container/wait.go | 53 + .../docker/cli/cli/command/events_utils.go | 47 + .../docker/cli/cli/command/formatter/checkpoint.go | 52 + .../docker/cli/cli/command/formatter/config.go | 171 + .../docker/cli/cli/command/formatter/container.go | 345 + .../docker/cli/cli/command/formatter/custom.go | 35 + .../docker/cli/cli/command/formatter/diff.go | 72 + .../docker/cli/cli/command/formatter/disk_usage.go | 359 + .../docker/cli/cli/command/formatter/formatter.go | 119 + .../docker/cli/cli/command/formatter/history.go | 107 + .../docker/cli/cli/command/formatter/image.go | 272 + .../docker/cli/cli/command/formatter/network.go | 129 + .../docker/cli/cli/command/formatter/node.go | 330 + .../docker/cli/cli/command/formatter/plugin.go | 95 + .../docker/cli/cli/command/formatter/reflect.go | 66 + .../docker/cli/cli/command/formatter/secret.go | 162 + .../docker/cli/cli/command/formatter/service.go | 535 + .../docker/cli/cli/command/formatter/stack.go | 67 + .../docker/cli/cli/command/formatter/stats.go | 218 + .../docker/cli/cli/command/formatter/task.go | 150 + .../docker/cli/cli/command/formatter/volume.go | 131 + .../docker/cli/cli/command/image/build.go | 503 + .../docker/cli/cli/command/image/build/context.go | 377 + .../cli/cli/command/image/build/context_unix.go | 11 + .../cli/cli/command/image/build/context_windows.go | 17 + .../cli/cli/command/image/build/dockerignore.go | 39 + .../github.com/docker/cli/cli/command/image/cmd.go | 35 + .../docker/cli/cli/command/image/delta.go | 44 + .../docker/cli/cli/command/image/history.go | 64 + .../docker/cli/cli/command/image/import.go | 87 + .../docker/cli/cli/command/image/inspect.go | 44 + .../docker/cli/cli/command/image/list.go | 95 + .../docker/cli/cli/command/image/load.go | 77 + .../docker/cli/cli/command/image/prune.go | 95 + .../docker/cli/cli/command/image/pull.go | 85 + .../docker/cli/cli/command/image/push.go | 61 + .../docker/cli/cli/command/image/remove.go | 82 + .../docker/cli/cli/command/image/save.go | 56 + .../github.com/docker/cli/cli/command/image/tag.go | 41 + .../docker/cli/cli/command/image/trust.go | 384 + vendor/github.com/docker/cli/cli/command/in.go | 56 + .../docker/cli/cli/command/inspect/inspector.go | 199 + .../docker/cli/cli/command/network/cmd.go | 29 + .../docker/cli/cli/command/network/connect.go | 62 + .../docker/cli/cli/command/network/create.go | 248 + .../docker/cli/cli/command/network/disconnect.go | 41 + .../docker/cli/cli/command/network/inspect.go | 48 + .../docker/cli/cli/command/network/list.go | 75 + .../docker/cli/cli/command/network/prune.go | 76 + .../docker/cli/cli/command/network/remove.go | 54 + vendor/github.com/docker/cli/cli/command/out.go | 50 + .../docker/cli/cli/command/prune/prune.go | 51 + .../github.com/docker/cli/cli/command/registry.go | 189 + .../docker/cli/cli/command/registry/login.go | 82 + .../docker/cli/cli/command/registry/logout.go | 77 + .../docker/cli/cli/command/registry/search.go | 125 + vendor/github.com/docker/cli/cli/command/stream.go | 34 + .../docker/cli/cli/command/system/cmd.go | 26 + .../github.com/docker/cli/cli/command/system/df.go | 68 + .../docker/cli/cli/command/system/events.go | 139 + .../docker/cli/cli/command/system/info.go | 381 + .../docker/cli/cli/command/system/inspect.go | 216 + .../docker/cli/cli/command/system/prune.go | 103 + .../docker/cli/cli/command/system/version.go | 130 + vendor/github.com/docker/cli/cli/command/trust.go | 43 + vendor/github.com/docker/cli/cli/command/utils.go | 119 + .../docker/cli/cli/command/volume/cmd.go | 26 + .../docker/cli/cli/command/volume/create.go | 69 + .../docker/cli/cli/command/volume/inspect.go | 45 + .../docker/cli/cli/command/volume/list.go | 73 + .../docker/cli/cli/command/volume/prune.go | 78 + .../docker/cli/cli/command/volume/remove.go | 69 + .../cli/cli/compose/interpolation/interpolation.go | 96 + .../docker/cli/cli/compose/loader/loader.go | 714 + .../docker/cli/cli/compose/loader/volume.go | 117 + .../docker/cli/cli/compose/schema/bindata.go | 306 + .../docker/cli/cli/compose/schema/schema.go | 168 + .../docker/cli/cli/compose/template/template.go | 101 + .../docker/cli/cli/compose/types/types.go | 337 + vendor/github.com/docker/cli/cli/config/config.go | 120 + .../docker/cli/cli/config/configfile/file.go | 233 + .../cli/cli/config/credentials/credentials.go | 17 + .../cli/cli/config/credentials/default_store.go | 22 + .../cli/config/credentials/default_store_darwin.go | 3 + .../cli/config/credentials/default_store_linux.go | 3 + .../credentials/default_store_unsupported.go | 5 + .../config/credentials/default_store_windows.go | 3 + .../cli/cli/config/credentials/file_store.go | 53 + .../cli/cli/config/credentials/native_store.go | 144 + vendor/github.com/docker/cli/cli/debug/debug.go | 26 + vendor/github.com/docker/cli/cli/error.go | 33 + vendor/github.com/docker/cli/cli/flags/client.go | 13 + vendor/github.com/docker/cli/cli/flags/common.go | 117 + vendor/github.com/docker/cli/cli/required.go | 96 + vendor/github.com/docker/cli/cli/trust/trust.go | 232 + vendor/github.com/docker/cli/cli/version.go | 9 + .../docker/cli/cli/winresources/res_windows.go | 18 + .../docker/cli/cmd/docker/daemon_none.go | 29 + .../docker/cli/cmd/docker/daemon_unix.go | 79 + vendor/github.com/docker/cli/cmd/docker/docker.go | 309 + .../docker/cli/cmd/docker/docker_windows.go | 3 + vendor/github.com/docker/cli/opts/config.go | 98 + vendor/github.com/docker/cli/opts/duration.go | 64 + vendor/github.com/docker/cli/opts/env.go | 46 + vendor/github.com/docker/cli/opts/envfile.go | 81 + vendor/github.com/docker/cli/opts/hosts.go | 165 + vendor/github.com/docker/cli/opts/hosts_unix.go | 8 + vendor/github.com/docker/cli/opts/hosts_windows.go | 6 + vendor/github.com/docker/cli/opts/ip.go | 47 + vendor/github.com/docker/cli/opts/mount.go | 174 + vendor/github.com/docker/cli/opts/network.go | 106 + vendor/github.com/docker/cli/opts/opts.go | 488 + vendor/github.com/docker/cli/opts/opts_unix.go | 6 + vendor/github.com/docker/cli/opts/opts_windows.go | 56 + vendor/github.com/docker/cli/opts/parse.go | 87 + vendor/github.com/docker/cli/opts/port.go | 163 + vendor/github.com/docker/cli/opts/quotedstring.go | 37 + vendor/github.com/docker/cli/opts/runtime.go | 79 + vendor/github.com/docker/cli/opts/secret.go | 98 + .../github.com/docker/cli/opts/throttledevice.go | 108 + vendor/github.com/docker/cli/opts/ulimit.go | 57 + vendor/github.com/docker/cli/opts/weightdevice.go | 84 + vendor/github.com/docker/cli/vendor.conf | 51 + vendor/github.com/docker/distribution/LICENSE | 202 + vendor/github.com/docker/distribution/README.md | 131 + vendor/github.com/docker/distribution/blobs.go | 257 + .../docker/distribution/context/context.go | 85 + .../github.com/docker/distribution/context/doc.go | 89 + .../github.com/docker/distribution/context/http.go | 366 + .../docker/distribution/context/logger.go | 116 + .../docker/distribution/context/trace.go | 104 + .../github.com/docker/distribution/context/util.go | 24 + .../docker/distribution/context/version.go | 16 + .../docker/distribution/digestset/set.go | 247 + vendor/github.com/docker/distribution/doc.go | 7 + vendor/github.com/docker/distribution/errors.go | 115 + .../github.com/docker/distribution/manifest/doc.go | 1 + .../manifest/manifestlist/manifestlist.go | 155 + .../manifest/schema1/config_builder.go | 287 + .../distribution/manifest/schema1/manifest.go | 184 + .../manifest/schema1/reference_builder.go | 98 + .../docker/distribution/manifest/schema1/sign.go | 68 + .../docker/distribution/manifest/schema1/verify.go | 32 + .../distribution/manifest/schema2/builder.go | 84 + .../distribution/manifest/schema2/manifest.go | 138 + .../docker/distribution/manifest/versioned.go | 12 + vendor/github.com/docker/distribution/manifests.go | 125 + .../docker/distribution/reference/helpers.go | 42 + .../docker/distribution/reference/normalize.go | 170 + .../docker/distribution/reference/reference.go | 433 + .../docker/distribution/reference/regexp.go | 143 + vendor/github.com/docker/distribution/registry.go | 97 + .../distribution/registry/api/errcode/errors.go | 267 + .../distribution/registry/api/errcode/handler.go | 44 + .../distribution/registry/api/errcode/register.go | 138 + .../distribution/registry/api/v2/descriptors.go | 1596 ++ .../docker/distribution/registry/api/v2/doc.go | 9 + .../docker/distribution/registry/api/v2/errors.go | 136 + .../distribution/registry/api/v2/headerparser.go | 161 + .../docker/distribution/registry/api/v2/routes.go | 49 + .../docker/distribution/registry/api/v2/urls.go | 314 + .../registry/client/auth/api_version.go | 58 + .../registry/client/auth/challenge/addr.go | 27 + .../client/auth/challenge/authchallenge.go | 237 + .../distribution/registry/client/auth/session.go | 505 + .../distribution/registry/client/blob_writer.go | 162 + .../docker/distribution/registry/client/errors.go | 139 + .../distribution/registry/client/repository.go | 853 + .../registry/client/transport/http_reader.go | 251 + .../registry/client/transport/transport.go | 147 + .../distribution/registry/storage/cache/cache.go | 35 + .../storage/cache/cachedblobdescriptorstore.go | 101 + .../registry/storage/cache/memory/memory.go | 179 + vendor/github.com/docker/distribution/tags.go | 27 + vendor/github.com/docker/distribution/uuid/uuid.go | 126 + vendor/github.com/docker/distribution/vendor.conf | 41 + .../docker/docker-credential-helpers/LICENSE | 20 + .../docker/docker-credential-helpers/README.md | 76 + .../docker-credential-helpers/client/client.go | 121 + .../docker-credential-helpers/client/command.go | 44 + .../credentials/credentials.go | 177 + .../docker-credential-helpers/credentials/error.go | 105 + .../credentials/helper.go | 14 + .../osxkeychain/osxkeychain_darwin.c | 228 + .../osxkeychain/osxkeychain_darwin.go | 170 + .../osxkeychain/osxkeychain_darwin.h | 14 + .../secretservice/secretservice_linux.c | 162 + .../secretservice/secretservice_linux.go | 115 + .../secretservice/secretservice_linux.h | 13 + vendor/github.com/docker/go-connections/LICENSE | 191 + vendor/github.com/docker/go-connections/README.md | 13 + vendor/github.com/docker/go-connections/nat/nat.go | 242 + .../github.com/docker/go-connections/nat/parse.go | 57 + .../github.com/docker/go-connections/nat/sort.go | 96 + .../docker/go-connections/sockets/README.md | 0 .../docker/go-connections/sockets/inmem_socket.go | 81 + .../docker/go-connections/sockets/proxy.go | 51 + .../docker/go-connections/sockets/sockets.go | 38 + .../docker/go-connections/sockets/sockets_unix.go | 35 + .../go-connections/sockets/sockets_windows.go | 27 + .../docker/go-connections/sockets/tcp_socket.go | 22 + .../docker/go-connections/sockets/unix_socket.go | 32 + .../go-connections/tlsconfig/certpool_go17.go | 18 + .../go-connections/tlsconfig/certpool_other.go | 14 + .../docker/go-connections/tlsconfig/config.go | 244 + .../tlsconfig/config_client_ciphers.go | 17 + .../tlsconfig/config_legacy_client_ciphers.go | 15 + vendor/github.com/docker/go-events/LICENSE | 201 + vendor/github.com/docker/go-events/README.md | 117 + vendor/github.com/docker/go-events/broadcast.go | 178 + vendor/github.com/docker/go-events/channel.go | 61 + vendor/github.com/docker/go-events/errors.go | 10 + vendor/github.com/docker/go-events/event.go | 15 + vendor/github.com/docker/go-events/filter.go | 52 + vendor/github.com/docker/go-events/queue.go | 111 + vendor/github.com/docker/go-events/retry.go | 260 + vendor/github.com/docker/go-metrics/LICENSE.code | 191 + vendor/github.com/docker/go-metrics/LICENSE.docs | 425 + vendor/github.com/docker/go-metrics/NOTICE | 16 + vendor/github.com/docker/go-metrics/README.md | 79 + vendor/github.com/docker/go-metrics/counter.go | 52 + vendor/github.com/docker/go-metrics/docs.go | 3 + vendor/github.com/docker/go-metrics/gauge.go | 72 + vendor/github.com/docker/go-metrics/handler.go | 13 + vendor/github.com/docker/go-metrics/helpers.go | 10 + vendor/github.com/docker/go-metrics/namespace.go | 181 + vendor/github.com/docker/go-metrics/register.go | 15 + vendor/github.com/docker/go-metrics/timer.go | 68 + vendor/github.com/docker/go-metrics/unit.go | 12 + vendor/github.com/docker/go-units/LICENSE | 191 + vendor/github.com/docker/go-units/README.md | 16 + vendor/github.com/docker/go-units/duration.go | 35 + vendor/github.com/docker/go-units/size.go | 108 + vendor/github.com/docker/go-units/ulimit.go | 118 + vendor/github.com/docker/go/LICENSE | 27 + vendor/github.com/docker/go/README.md | 16 + .../github.com/docker/go/canonical/json/decode.go | 1168 ++ .../github.com/docker/go/canonical/json/encode.go | 1250 ++ vendor/github.com/docker/go/canonical/json/fold.go | 143 + .../github.com/docker/go/canonical/json/indent.go | 141 + .../github.com/docker/go/canonical/json/scanner.go | 623 + .../github.com/docker/go/canonical/json/stream.go | 487 + vendor/github.com/docker/go/canonical/json/tags.go | 44 + vendor/github.com/docker/libkv/LICENSE.code | 191 + vendor/github.com/docker/libkv/LICENSE.docs | 425 + vendor/github.com/docker/libkv/README.md | 107 + vendor/github.com/docker/libkv/libkv.go | 40 + .../github.com/docker/libkv/store/boltdb/boltdb.go | 474 + vendor/github.com/docker/libkv/store/helpers.go | 47 + vendor/github.com/docker/libkv/store/store.go | 132 + vendor/github.com/docker/libnetwork/LICENSE | 202 + vendor/github.com/docker/libnetwork/README.md | 89 + vendor/github.com/docker/libnetwork/agent.go | 915 + vendor/github.com/docker/libnetwork/agent.pb.go | 1003 ++ vendor/github.com/docker/libnetwork/agent.proto | 72 + .../docker/libnetwork/bitseq/sequence.go | 679 + .../github.com/docker/libnetwork/bitseq/store.go | 141 + .../docker/libnetwork/cluster/provider.go | 36 + .../github.com/docker/libnetwork/cmd/proxy/main.go | 67 + .../docker/libnetwork/cmd/proxy/proxy.go | 36 + .../docker/libnetwork/cmd/proxy/stub_proxy.go | 31 + .../docker/libnetwork/cmd/proxy/tcp_proxy.go | 89 + .../docker/libnetwork/cmd/proxy/udp_proxy.go | 168 + .../docker/libnetwork/common/setmatrix.go | 135 + .../github.com/docker/libnetwork/config/config.go | 280 + vendor/github.com/docker/libnetwork/controller.go | 1269 ++ .../docker/libnetwork/datastore/cache.go | 178 + .../docker/libnetwork/datastore/datastore.go | 660 + .../docker/libnetwork/datastore/mock_store.go | 129 + .../docker/libnetwork/default_gateway.go | 201 + .../docker/libnetwork/default_gateway_freebsd.go | 13 + .../docker/libnetwork/default_gateway_linux.go | 32 + .../docker/libnetwork/default_gateway_solaris.go | 32 + .../docker/libnetwork/default_gateway_windows.go | 22 + .../docker/libnetwork/discoverapi/discoverapi.go | 60 + .../docker/libnetwork/driverapi/driverapi.go | 213 + .../docker/libnetwork/driverapi/errors.go | 56 + .../docker/libnetwork/driverapi/ipamdata.go | 103 + .../docker/libnetwork/drivers/bridge/bridge.go | 1531 ++ .../libnetwork/drivers/bridge/bridge_store.go | 388 + .../drivers/bridge/brmanager/brmanager.go | 88 + .../docker/libnetwork/drivers/bridge/errors.go | 341 + .../docker/libnetwork/drivers/bridge/interface.go | 86 + .../docker/libnetwork/drivers/bridge/labels.go | 18 + .../docker/libnetwork/drivers/bridge/link.go | 85 + .../drivers/bridge/netlink_deprecated_linux.go | 131 + .../bridge/netlink_deprecated_linux_armppc64.go | 7 + .../bridge/netlink_deprecated_linux_notarm.go | 7 + .../bridge/netlink_deprecated_unsupported.go | 18 + .../libnetwork/drivers/bridge/port_mapping.go | 128 + .../docker/libnetwork/drivers/bridge/setup.go | 26 + .../drivers/bridge/setup_bridgenetfiltering.go | 163 + .../libnetwork/drivers/bridge/setup_device.go | 68 + .../libnetwork/drivers/bridge/setup_firewalld.go | 20 + .../drivers/bridge/setup_ip_forwarding.go | 56 + .../libnetwork/drivers/bridge/setup_ip_tables.go | 323 + .../docker/libnetwork/drivers/bridge/setup_ipv4.go | 80 + .../docker/libnetwork/drivers/bridge/setup_ipv6.go | 119 + .../libnetwork/drivers/bridge/setup_verify.go | 73 + .../docker/libnetwork/drivers/host/host.go | 106 + .../docker/libnetwork/drivers/ipvlan/ipvlan.go | 115 + .../libnetwork/drivers/ipvlan/ipvlan_endpoint.go | 87 + .../libnetwork/drivers/ipvlan/ipvlan_joinleave.go | 199 + .../libnetwork/drivers/ipvlan/ipvlan_network.go | 250 + .../libnetwork/drivers/ipvlan/ipvlan_setup.go | 205 + .../libnetwork/drivers/ipvlan/ipvlan_state.go | 115 + .../libnetwork/drivers/ipvlan/ipvlan_store.go | 349 + .../drivers/ipvlan/ivmanager/ivmanager.go | 88 + .../drivers/macvlan/mvmanager/mvmanager.go | 88 + .../docker/libnetwork/drivers/null/null.go | 105 + .../drivers/overlay/ovmanager/ovmanager.go | 257 + .../docker/libnetwork/drivers/remote/api/api.go | 221 + .../docker/libnetwork/drivers/remote/driver.go | 409 + .../libnetwork/drivers/solaris/bridge/bridge.go | 1263 ++ .../drivers/solaris/bridge/bridge_store.go | 384 + .../libnetwork/drivers/solaris/bridge/errors.go | 119 + .../drivers/solaris/bridge/port_mapping.go | 225 + .../drivers/solaris/overlay/encryption.go | 274 + .../drivers/solaris/overlay/joinleave.go | 188 + .../drivers/solaris/overlay/ov_endpoint.go | 249 + .../drivers/solaris/overlay/ov_network.go | 786 + .../libnetwork/drivers/solaris/overlay/ov_serf.go | 233 + .../libnetwork/drivers/solaris/overlay/ov_utils.go | 61 + .../libnetwork/drivers/solaris/overlay/overlay.go | 367 + .../drivers/solaris/overlay/overlay.pb.go | 468 + .../drivers/solaris/overlay/overlay.proto | 27 + .../libnetwork/drivers/solaris/overlay/peerdb.go | 336 + .../docker/libnetwork/drivers/windows/labels.go | 39 + .../drivers/windows/overlay/joinleave_windows.go | 107 + .../drivers/windows/overlay/ov_endpoint_windows.go | 216 + .../drivers/windows/overlay/ov_network_windows.go | 380 + .../drivers/windows/overlay/overlay.pb.go | 468 + .../drivers/windows/overlay/overlay.proto | 27 + .../drivers/windows/overlay/overlay_windows.go | 159 + .../drivers/windows/overlay/peerdb_windows.go | 120 + .../docker/libnetwork/drivers/windows/windows.go | 771 + .../libnetwork/drivers/windows/windows_store.go | 339 + .../libnetwork/drivers_experimental_linux.go | 9 + .../docker/libnetwork/drivers_freebsd.go | 13 + .../github.com/docker/libnetwork/drivers_ipam.go | 23 + .../github.com/docker/libnetwork/drivers_linux.go | 22 + .../docker/libnetwork/drivers_solaris.go | 15 + .../docker/libnetwork/drivers_windows.go | 20 + .../docker/libnetwork/drvregistry/drvregistry.go | 228 + vendor/github.com/docker/libnetwork/endpoint.go | 1192 ++ .../github.com/docker/libnetwork/endpoint_cnt.go | 178 + .../github.com/docker/libnetwork/endpoint_info.go | 449 + .../docker/libnetwork/endpoint_info_unix.go | 30 + .../docker/libnetwork/endpoint_info_windows.go | 45 + vendor/github.com/docker/libnetwork/error.go | 193 + .../docker/libnetwork/etchosts/etchosts.go | 208 + .../github.com/docker/libnetwork/firewall_linux.go | 29 + .../docker/libnetwork/firewall_others.go | 6 + .../libnetwork/hostdiscovery/hostdiscovery.go | 112 + .../libnetwork/hostdiscovery/hostdiscovery_api.go | 22 + vendor/github.com/docker/libnetwork/idm/idm.go | 76 + .../github.com/docker/libnetwork/ipam/allocator.go | 601 + vendor/github.com/docker/libnetwork/ipam/store.go | 136 + .../docker/libnetwork/ipam/structures.go | 362 + vendor/github.com/docker/libnetwork/ipam/utils.go | 81 + .../docker/libnetwork/ipamapi/contract.go | 96 + .../libnetwork/ipams/builtin/builtin_unix.go | 43 + .../libnetwork/ipams/builtin/builtin_windows.go | 57 + .../docker/libnetwork/ipams/null/null.go | 75 + .../docker/libnetwork/ipams/remote/api/api.go | 94 + .../docker/libnetwork/ipams/remote/remote.go | 155 + .../libnetwork/ipams/windowsipam/windowsipam.go | 107 + .../docker/libnetwork/ipamutils/utils.go | 50 + .../docker/libnetwork/iptables/conntrack.go | 59 + .../docker/libnetwork/iptables/firewalld.go | 167 + .../docker/libnetwork/iptables/iptables.go | 541 + .../github.com/docker/libnetwork/ipvs/constants.go | 147 + vendor/github.com/docker/libnetwork/ipvs/ipvs.go | 161 + .../github.com/docker/libnetwork/ipvs/netlink.go | 546 + .../docker/libnetwork/netlabel/labels.go | 129 + .../github.com/docker/libnetwork/netutils/utils.go | 194 + .../docker/libnetwork/netutils/utils_freebsd.go | 23 + .../docker/libnetwork/netutils/utils_linux.go | 126 + .../docker/libnetwork/netutils/utils_solaris.go | 104 + .../docker/libnetwork/netutils/utils_windows.go | 25 + vendor/github.com/docker/libnetwork/network.go | 2030 +++ .../github.com/docker/libnetwork/network_unix.go | 14 + .../docker/libnetwork/network_windows.go | 72 + .../docker/libnetwork/networkdb/broadcast.go | 171 + .../docker/libnetwork/networkdb/cluster.go | 663 + .../docker/libnetwork/networkdb/delegate.go | 516 + .../docker/libnetwork/networkdb/event_delegate.go | 68 + .../docker/libnetwork/networkdb/message.go | 102 + .../docker/libnetwork/networkdb/networkdb.go | 644 + .../docker/libnetwork/networkdb/networkdb.pb.go | 2554 +++ .../docker/libnetwork/networkdb/networkdb.proto | 183 + .../docker/libnetwork/networkdb/watch.go | 110 + .../github.com/docker/libnetwork/ns/init_linux.go | 140 + .../docker/libnetwork/options/options.go | 88 + .../docker/libnetwork/osl/interface_freebsd.go | 4 + .../docker/libnetwork/osl/interface_linux.go | 469 + .../docker/libnetwork/osl/interface_solaris.go | 4 + .../docker/libnetwork/osl/interface_windows.go | 4 + .../docker/libnetwork/osl/namespace_linux.go | 610 + .../docker/libnetwork/osl/namespace_unsupported.go | 17 + .../docker/libnetwork/osl/namespace_windows.go | 43 + .../docker/libnetwork/osl/neigh_freebsd.go | 4 + .../docker/libnetwork/osl/neigh_linux.go | 181 + .../docker/libnetwork/osl/neigh_solaris.go | 4 + .../docker/libnetwork/osl/neigh_windows.go | 4 + .../docker/libnetwork/osl/options_linux.go | 79 + .../docker/libnetwork/osl/route_linux.go | 203 + vendor/github.com/docker/libnetwork/osl/sandbox.go | 171 + .../docker/libnetwork/osl/sandbox_freebsd.go | 44 + .../docker/libnetwork/osl/sandbox_solaris.go | 24 + .../docker/libnetwork/osl/sandbox_unsupported.go | 22 + .../libnetwork/portallocator/portallocator.go | 248 + .../portallocator/portallocator_linux.go | 27 + .../portallocator/portallocator_solaris.go | 5 + .../docker/libnetwork/portmapper/mapper.go | 241 + .../docker/libnetwork/portmapper/mock_proxy.go | 18 + .../docker/libnetwork/portmapper/proxy.go | 119 + .../docker/libnetwork/portmapper/proxy_linux.go | 38 + .../docker/libnetwork/portmapper/proxy_solaris.go | 34 + .../docker/libnetwork/resolvconf/README.md | 1 + .../docker/libnetwork/resolvconf/dns/resolvconf.go | 26 + .../docker/libnetwork/resolvconf/resolvconf.go | 254 + vendor/github.com/docker/libnetwork/resolver.go | 521 + .../github.com/docker/libnetwork/resolver_unix.go | 101 + .../docker/libnetwork/resolver_windows.go | 7 + vendor/github.com/docker/libnetwork/sandbox.go | 1224 ++ .../docker/libnetwork/sandbox_dns_unix.go | 407 + .../docker/libnetwork/sandbox_dns_windows.go | 35 + .../docker/libnetwork/sandbox_externalkey.go | 12 + .../libnetwork/sandbox_externalkey_solaris.go | 45 + .../docker/libnetwork/sandbox_externalkey_unix.go | 177 + .../libnetwork/sandbox_externalkey_windows.go | 45 + .../github.com/docker/libnetwork/sandbox_store.go | 307 + vendor/github.com/docker/libnetwork/service.go | 92 + .../github.com/docker/libnetwork/service_common.go | 381 + .../github.com/docker/libnetwork/service_linux.go | 778 + .../docker/libnetwork/service_unsupported.go | 25 + .../docker/libnetwork/service_windows.go | 15 + vendor/github.com/docker/libnetwork/store.go | 489 + vendor/github.com/docker/libnetwork/types/types.go | 636 + vendor/github.com/docker/libnetwork/vendor.conf | 44 + vendor/github.com/docker/libtrust/LICENSE | 191 + vendor/github.com/docker/libtrust/README.md | 18 + vendor/github.com/docker/libtrust/certificates.go | 175 + vendor/github.com/docker/libtrust/doc.go | 9 + vendor/github.com/docker/libtrust/ec_key.go | 428 + vendor/github.com/docker/libtrust/filter.go | 50 + vendor/github.com/docker/libtrust/hash.go | 56 + vendor/github.com/docker/libtrust/jsonsign.go | 657 + vendor/github.com/docker/libtrust/key.go | 253 + vendor/github.com/docker/libtrust/key_files.go | 255 + vendor/github.com/docker/libtrust/key_manager.go | 175 + vendor/github.com/docker/libtrust/rsa_key.go | 427 + vendor/github.com/docker/libtrust/util.go | 363 + vendor/github.com/docker/notary/LICENSE | 201 + vendor/github.com/docker/notary/README.md | 100 + .../docker/notary/client/changelist/change.go | 102 + .../docker/notary/client/changelist/changelist.go | 77 + .../notary/client/changelist/file_changelist.go | 197 + .../docker/notary/client/changelist/interface.go | 73 + vendor/github.com/docker/notary/client/client.go | 1014 ++ .../github.com/docker/notary/client/delegations.go | 299 + vendor/github.com/docker/notary/client/helpers.go | 278 + vendor/github.com/docker/notary/client/repo.go | 29 + .../github.com/docker/notary/client/repo_pkcs11.go | 34 + .../github.com/docker/notary/client/tufclient.go | 239 + vendor/github.com/docker/notary/client/witness.go | 69 + vendor/github.com/docker/notary/const.go | 70 + vendor/github.com/docker/notary/const_nowindows.go | 16 + vendor/github.com/docker/notary/const_windows.go | 8 + .../docker/notary/cryptoservice/certificate.go | 41 + .../docker/notary/cryptoservice/crypto_service.go | 181 + vendor/github.com/docker/notary/notary.go | 7 + .../docker/notary/passphrase/passphrase.go | 205 + vendor/github.com/docker/notary/storage/errors.go | 22 + .../github.com/docker/notary/storage/filestore.go | 222 + .../github.com/docker/notary/storage/httpstore.go | 339 + .../github.com/docker/notary/storage/interfaces.go | 34 + .../docker/notary/storage/memorystore.go | 124 + .../docker/notary/storage/offlinestore.go | 54 + .../docker/notary/trustmanager/interfaces.go | 82 + .../docker/notary/trustmanager/keystore.go | 325 + .../docker/notary/trustmanager/yubikey/import.go | 57 + .../notary/trustmanager/yubikey/non_pkcs11.go | 9 + .../notary/trustmanager/yubikey/pkcs11_darwin.go | 9 + .../trustmanager/yubikey/pkcs11_interface.go | 40 + .../notary/trustmanager/yubikey/pkcs11_linux.go | 10 + .../notary/trustmanager/yubikey/yubikeystore.go | 914 + .../github.com/docker/notary/trustpinning/certs.go | 291 + .../docker/notary/trustpinning/trustpin.go | 121 + vendor/github.com/docker/notary/tuf/LICENSE | 30 + vendor/github.com/docker/notary/tuf/README.md | 6 + vendor/github.com/docker/notary/tuf/builder.go | 710 + vendor/github.com/docker/notary/tuf/data/errors.go | 53 + vendor/github.com/docker/notary/tuf/data/keys.go | 528 + vendor/github.com/docker/notary/tuf/data/roles.go | 338 + vendor/github.com/docker/notary/tuf/data/root.go | 171 + .../docker/notary/tuf/data/serializer.go | 36 + .../github.com/docker/notary/tuf/data/snapshot.go | 169 + .../github.com/docker/notary/tuf/data/targets.go | 201 + .../github.com/docker/notary/tuf/data/timestamp.go | 136 + vendor/github.com/docker/notary/tuf/data/types.go | 311 + .../github.com/docker/notary/tuf/signed/ed25519.go | 111 + .../github.com/docker/notary/tuf/signed/errors.go | 96 + .../docker/notary/tuf/signed/interface.go | 49 + vendor/github.com/docker/notary/tuf/signed/sign.go | 113 + .../docker/notary/tuf/signed/verifiers.go | 283 + .../github.com/docker/notary/tuf/signed/verify.go | 112 + vendor/github.com/docker/notary/tuf/tuf.go | 1148 ++ .../docker/notary/tuf/utils/role_sort.go | 31 + vendor/github.com/docker/notary/tuf/utils/stack.go | 85 + vendor/github.com/docker/notary/tuf/utils/utils.go | 152 + vendor/github.com/docker/notary/tuf/utils/x509.go | 551 + .../docker/notary/tuf/validation/errors.go | 126 + vendor/github.com/docker/swarmkit/LICENSE | 201 + vendor/github.com/docker/swarmkit/README.md | 327 + vendor/github.com/docker/swarmkit/agent/agent.go | 595 + vendor/github.com/docker/swarmkit/agent/config.go | 83 + .../docker/swarmkit/agent/configs/configs.go | 88 + .../github.com/docker/swarmkit/agent/dependency.go | 52 + vendor/github.com/docker/swarmkit/agent/errors.go | 22 + .../docker/swarmkit/agent/exec/controller.go | 358 + .../docker/swarmkit/agent/exec/controller_stub.go | 75 + .../docker/swarmkit/agent/exec/errors.go | 82 + .../docker/swarmkit/agent/exec/executor.go | 81 + vendor/github.com/docker/swarmkit/agent/helpers.go | 13 + .../github.com/docker/swarmkit/agent/reporter.go | 129 + .../github.com/docker/swarmkit/agent/resource.go | 69 + .../docker/swarmkit/agent/secrets/secrets.go | 88 + vendor/github.com/docker/swarmkit/agent/session.go | 423 + vendor/github.com/docker/swarmkit/agent/storage.go | 216 + vendor/github.com/docker/swarmkit/agent/task.go | 248 + vendor/github.com/docker/swarmkit/agent/worker.go | 604 + vendor/github.com/docker/swarmkit/api/README.md | 8 + vendor/github.com/docker/swarmkit/api/ca.pb.go | 2116 +++ vendor/github.com/docker/swarmkit/api/ca.proto | 72 + .../github.com/docker/swarmkit/api/control.pb.go | 16092 +++++++++++++++++ .../github.com/docker/swarmkit/api/control.proto | 554 + .../docker/swarmkit/api/deepcopy/copy.go | 53 + .../docker/swarmkit/api/defaults/service.go | 99 + .../docker/swarmkit/api/dispatcher.pb.go | 3847 ++++ .../docker/swarmkit/api/dispatcher.proto | 218 + .../docker/swarmkit/api/equality/equality.go | 67 + vendor/github.com/docker/swarmkit/api/gen.go | 3 + .../docker/swarmkit/api/genericresource/helpers.go | 111 + .../docker/swarmkit/api/genericresource/parse.go | 47 + .../api/genericresource/resource_management.go | 202 + .../docker/swarmkit/api/genericresource/string.go | 54 + .../swarmkit/api/genericresource/validate.go | 84 + vendor/github.com/docker/swarmkit/api/health.pb.go | 721 + vendor/github.com/docker/swarmkit/api/health.proto | 34 + .../github.com/docker/swarmkit/api/logbroker.pb.go | 3416 ++++ .../github.com/docker/swarmkit/api/logbroker.proto | 188 + .../docker/swarmkit/api/naming/naming.go | 49 + .../github.com/docker/swarmkit/api/objects.pb.go | 7787 +++++++++ .../github.com/docker/swarmkit/api/objects.proto | 452 + vendor/github.com/docker/swarmkit/api/raft.pb.go | 3638 ++++ vendor/github.com/docker/swarmkit/api/raft.proto | 132 + .../github.com/docker/swarmkit/api/resource.pb.go | 1091 ++ .../github.com/docker/swarmkit/api/resource.proto | 34 + .../github.com/docker/swarmkit/api/snapshot.pb.go | 1342 ++ .../github.com/docker/swarmkit/api/snapshot.proto | 44 + vendor/github.com/docker/swarmkit/api/specs.pb.go | 6277 +++++++ vendor/github.com/docker/swarmkit/api/specs.proto | 413 + .../github.com/docker/swarmkit/api/storeobject.go | 108 + vendor/github.com/docker/swarmkit/api/types.pb.go | 17356 +++++++++++++++++++ vendor/github.com/docker/swarmkit/api/types.proto | 1022 ++ vendor/github.com/docker/swarmkit/api/watch.pb.go | 4599 +++++ vendor/github.com/docker/swarmkit/api/watch.proto | 154 + vendor/github.com/docker/swarmkit/ca/auth.go | 247 + .../github.com/docker/swarmkit/ca/certificates.go | 1008 ++ vendor/github.com/docker/swarmkit/ca/config.go | 665 + vendor/github.com/docker/swarmkit/ca/external.go | 228 + vendor/github.com/docker/swarmkit/ca/forward.go | 77 + .../github.com/docker/swarmkit/ca/keyreadwriter.go | 388 + vendor/github.com/docker/swarmkit/ca/reconciler.go | 259 + vendor/github.com/docker/swarmkit/ca/renewer.go | 168 + vendor/github.com/docker/swarmkit/ca/server.go | 849 + vendor/github.com/docker/swarmkit/ca/transport.go | 213 + .../docker/swarmkit/connectionbroker/broker.go | 110 + vendor/github.com/docker/swarmkit/identity/doc.go | 16 + .../docker/swarmkit/identity/randomid.go | 53 + .../github.com/docker/swarmkit/ioutils/ioutils.go | 40 + vendor/github.com/docker/swarmkit/log/context.go | 96 + vendor/github.com/docker/swarmkit/log/grpc.go | 13 + .../docker/swarmkit/manager/allocator/allocator.go | 231 + .../allocator/cnmallocator/drivers_darwin.go | 17 + .../manager/allocator/cnmallocator/drivers_ipam.go | 23 + .../cnmallocator/drivers_network_linux.go | 28 + .../cnmallocator/drivers_network_windows.go | 17 + .../allocator/cnmallocator/drivers_unsupported.go | 14 + .../allocator/cnmallocator/networkallocator.go | 961 + .../allocator/cnmallocator/portallocator.go | 410 + .../docker/swarmkit/manager/allocator/doc.go | 18 + .../docker/swarmkit/manager/allocator/network.go | 1193 ++ .../allocator/networkallocator/networkallocator.go | 132 + .../swarmkit/manager/constraint/constraint.go | 207 + .../swarmkit/manager/controlapi/ca_rotation.go | 282 + .../docker/swarmkit/manager/controlapi/cluster.go | 287 + .../docker/swarmkit/manager/controlapi/common.go | 135 + .../docker/swarmkit/manager/controlapi/config.go | 248 + .../docker/swarmkit/manager/controlapi/network.go | 302 + .../docker/swarmkit/manager/controlapi/node.go | 322 + .../docker/swarmkit/manager/controlapi/secret.go | 253 + .../docker/swarmkit/manager/controlapi/server.go | 36 + .../docker/swarmkit/manager/controlapi/service.go | 933 + .../docker/swarmkit/manager/controlapi/task.go | 170 + vendor/github.com/docker/swarmkit/manager/deks.go | 269 + vendor/github.com/docker/swarmkit/manager/dirty.go | 57 + .../swarmkit/manager/dispatcher/assignments.go | 247 + .../swarmkit/manager/dispatcher/dispatcher.go | 1250 ++ .../manager/dispatcher/heartbeat/heartbeat.go | 39 + .../docker/swarmkit/manager/dispatcher/nodes.go | 198 + .../docker/swarmkit/manager/dispatcher/period.go | 28 + vendor/github.com/docker/swarmkit/manager/doc.go | 1 + .../swarmkit/manager/encryption/encryption.go | 132 + .../docker/swarmkit/manager/encryption/nacl.go | 73 + .../docker/swarmkit/manager/health/health.go | 58 + .../swarmkit/manager/keymanager/keymanager.go | 239 + .../docker/swarmkit/manager/logbroker/broker.go | 427 + .../swarmkit/manager/logbroker/subscription.go | 248 + .../github.com/docker/swarmkit/manager/manager.go | 1197 ++ .../docker/swarmkit/manager/metrics/collector.go | 104 + .../constraintenforcer/constraint_enforcer.go | 183 + .../swarmkit/manager/orchestrator/global/global.go | 592 + .../manager/orchestrator/replicated/replicated.go | 108 + .../manager/orchestrator/replicated/services.go | 229 + .../manager/orchestrator/replicated/slot.go | 55 + .../manager/orchestrator/replicated/tasks.go | 180 + .../manager/orchestrator/restart/restart.go | 447 + .../swarmkit/manager/orchestrator/service.go | 61 + .../docker/swarmkit/manager/orchestrator/slot.go | 46 + .../docker/swarmkit/manager/orchestrator/task.go | 79 + .../swarmkit/manager/orchestrator/taskinit/init.go | 103 + .../manager/orchestrator/taskreaper/task_reaper.go | 245 + .../manager/orchestrator/update/updater.go | 642 + .../swarmkit/manager/raftselector/raftselector.go | 20 + .../swarmkit/manager/resourceapi/allocator.go | 124 + .../docker/swarmkit/manager/role_manager.go | 177 + .../swarmkit/manager/scheduler/decision_tree.go | 55 + .../docker/swarmkit/manager/scheduler/filter.go | 350 + .../docker/swarmkit/manager/scheduler/nodeheap.go | 31 + .../docker/swarmkit/manager/scheduler/nodeinfo.go | 181 + .../docker/swarmkit/manager/scheduler/nodeset.go | 124 + .../docker/swarmkit/manager/scheduler/pipeline.go | 98 + .../docker/swarmkit/manager/scheduler/scheduler.go | 737 + .../docker/swarmkit/manager/state/proposer.go | 30 + .../manager/state/raft/membership/cluster.go | 213 + .../docker/swarmkit/manager/state/raft/raft.go | 1974 +++ .../docker/swarmkit/manager/state/raft/storage.go | 248 + .../manager/state/raft/storage/snapwrap.go | 158 + .../swarmkit/manager/state/raft/storage/storage.go | 375 + .../swarmkit/manager/state/raft/storage/walwrap.go | 255 + .../swarmkit/manager/state/raft/transport/peer.go | 291 + .../manager/state/raft/transport/transport.go | 398 + .../docker/swarmkit/manager/state/raft/util.go | 86 + .../docker/swarmkit/manager/state/raft/wait.go | 77 + .../docker/swarmkit/manager/state/store/apply.go | 49 + .../docker/swarmkit/manager/state/store/by.go | 214 + .../swarmkit/manager/state/store/clusters.go | 138 + .../swarmkit/manager/state/store/combinators.go | 14 + .../docker/swarmkit/manager/state/store/configs.go | 132 + .../docker/swarmkit/manager/state/store/doc.go | 32 + .../swarmkit/manager/state/store/extensions.go | 190 + .../docker/swarmkit/manager/state/store/memory.go | 941 + .../swarmkit/manager/state/store/networks.go | 132 + .../docker/swarmkit/manager/state/store/nodes.go | 176 + .../docker/swarmkit/manager/state/store/object.go | 15 + .../swarmkit/manager/state/store/resources.go | 199 + .../docker/swarmkit/manager/state/store/secrets.go | 132 + .../swarmkit/manager/state/store/services.go | 248 + .../docker/swarmkit/manager/state/store/tasks.go | 341 + .../docker/swarmkit/manager/state/watch.go | 74 + .../docker/swarmkit/manager/watchapi/server.go | 17 + .../docker/swarmkit/manager/watchapi/watch.go | 56 + vendor/github.com/docker/swarmkit/node/node.go | 1100 ++ .../docker/swarmkit/protobuf/plugin/gen.go | 3 + .../docker/swarmkit/protobuf/plugin/helpers.go | 11 + .../docker/swarmkit/protobuf/plugin/plugin.pb.go | 1185 ++ .../docker/swarmkit/protobuf/plugin/plugin.proto | 53 + .../docker/swarmkit/protobuf/ptypes/doc.go | 3 + .../docker/swarmkit/protobuf/ptypes/timestamp.go | 17 + .../github.com/docker/swarmkit/remotes/remotes.go | 203 + .../github.com/docker/swarmkit/template/context.go | 192 + .../github.com/docker/swarmkit/template/expand.go | 161 + .../github.com/docker/swarmkit/template/getter.go | 98 + .../docker/swarmkit/template/template.go | 22 + vendor/github.com/docker/swarmkit/vendor.conf | 65 + vendor/github.com/docker/swarmkit/watch/watch.go | 107 + .../github.com/docker/swarmkit/xnet/xnet_unix.go | 20 + .../docker/swarmkit/xnet/xnet_windows.go | 31 + .../github.com/fluent/fluent-logger-golang/LICENSE | 202 + .../fluent/fluent-logger-golang/README.md | 65 + .../fluent/fluent-logger-golang/fluent/fluent.go | 309 + .../fluent/fluent-logger-golang/fluent/proto.go | 24 + .../fluent-logger-golang/fluent/proto_gen.go | 372 + .../fluent/fluent-logger-golang/fluent/version.go | 3 + vendor/github.com/fsnotify/fsnotify/LICENSE | 28 + vendor/github.com/fsnotify/fsnotify/README.md | 46 + vendor/github.com/fsnotify/fsnotify/fen.go | 37 + vendor/github.com/fsnotify/fsnotify/fsnotify.go | 62 + vendor/github.com/fsnotify/fsnotify/inotify.go | 324 + .../github.com/fsnotify/fsnotify/inotify_poller.go | 186 + vendor/github.com/fsnotify/fsnotify/kqueue.go | 502 + .../github.com/fsnotify/fsnotify/open_mode_bsd.go | 11 + .../fsnotify/fsnotify/open_mode_darwin.go | 12 + vendor/github.com/fsnotify/fsnotify/windows.go | 561 + vendor/github.com/go-check/check/LICENSE | 25 + vendor/github.com/go-check/check/README.md | 10 + vendor/github.com/go-check/check/benchmark.go | 187 + vendor/github.com/go-check/check/check.go | 939 + vendor/github.com/go-check/check/checkers.go | 458 + vendor/github.com/go-check/check/helpers.go | 231 + vendor/github.com/go-check/check/printer.go | 168 + vendor/github.com/go-check/check/reporter.go | 88 + vendor/github.com/go-check/check/run.go | 183 + vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/README.md | 560 + vendor/github.com/go-ini/ini/README_ZH.md | 547 + vendor/github.com/go-ini/ini/ini.go | 1226 ++ vendor/github.com/go-ini/ini/struct.go | 350 + vendor/github.com/godbus/dbus/LICENSE | 25 + vendor/github.com/godbus/dbus/README.markdown | 41 + vendor/github.com/godbus/dbus/auth.go | 253 + vendor/github.com/godbus/dbus/auth_external.go | 26 + vendor/github.com/godbus/dbus/auth_sha1.go | 102 + vendor/github.com/godbus/dbus/call.go | 36 + vendor/github.com/godbus/dbus/conn.go | 625 + vendor/github.com/godbus/dbus/conn_darwin.go | 21 + vendor/github.com/godbus/dbus/conn_other.go | 27 + vendor/github.com/godbus/dbus/dbus.go | 258 + vendor/github.com/godbus/dbus/decoder.go | 228 + vendor/github.com/godbus/dbus/doc.go | 63 + vendor/github.com/godbus/dbus/encoder.go | 208 + vendor/github.com/godbus/dbus/export.go | 411 + vendor/github.com/godbus/dbus/homedir.go | 28 + vendor/github.com/godbus/dbus/homedir_dynamic.go | 15 + vendor/github.com/godbus/dbus/homedir_static.go | 45 + vendor/github.com/godbus/dbus/message.go | 346 + vendor/github.com/godbus/dbus/object.go | 126 + vendor/github.com/godbus/dbus/sig.go | 257 + vendor/github.com/godbus/dbus/transport_darwin.go | 6 + vendor/github.com/godbus/dbus/transport_generic.go | 35 + vendor/github.com/godbus/dbus/transport_unix.go | 196 + .../godbus/dbus/transport_unixcred_dragonfly.go | 95 + .../godbus/dbus/transport_unixcred_linux.go | 25 + vendor/github.com/godbus/dbus/variant.go | 139 + vendor/github.com/godbus/dbus/variant_lexer.go | 284 + vendor/github.com/godbus/dbus/variant_parser.go | 817 + vendor/github.com/gogo/protobuf/LICENSE | 36 + vendor/github.com/gogo/protobuf/README | 258 + vendor/github.com/gogo/protobuf/Readme.md | 117 + vendor/github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../github.com/gogo/protobuf/gogoproto/gogo.pb.go | 804 + .../github.com/gogo/protobuf/gogoproto/gogo.proto | 132 + .../github.com/gogo/protobuf/gogoproto/helper.go | 357 + vendor/github.com/gogo/protobuf/io/full.go | 102 + vendor/github.com/gogo/protobuf/io/io.go | 70 + vendor/github.com/gogo/protobuf/io/uint32.go | 126 + vendor/github.com/gogo/protobuf/io/varint.go | 134 + vendor/github.com/gogo/protobuf/proto/clone.go | 234 + vendor/github.com/gogo/protobuf/proto/decode.go | 978 ++ .../github.com/gogo/protobuf/proto/decode_gogo.go | 172 + vendor/github.com/gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 203 + vendor/github.com/gogo/protobuf/proto/encode.go | 1362 ++ .../github.com/gogo/protobuf/proto/encode_gogo.go | 350 + vendor/github.com/gogo/protobuf/proto/equal.go | 300 + .../github.com/gogo/protobuf/proto/extensions.go | 693 + .../gogo/protobuf/proto/extensions_gogo.go | 294 + vendor/github.com/gogo/protobuf/proto/lib.go | 898 + vendor/github.com/gogo/protobuf/proto/lib_gogo.go | 42 + .../github.com/gogo/protobuf/proto/message_set.go | 311 + .../gogo/protobuf/proto/pointer_reflect.go | 484 + .../gogo/protobuf/proto/pointer_reflect_gogo.go | 85 + .../gogo/protobuf/proto/pointer_unsafe.go | 270 + .../gogo/protobuf/proto/pointer_unsafe_gogo.go | 128 + .../github.com/gogo/protobuf/proto/properties.go | 968 ++ .../gogo/protobuf/proto/properties_gogo.go | 111 + vendor/github.com/gogo/protobuf/proto/skip_gogo.go | 119 + vendor/github.com/gogo/protobuf/proto/text.go | 928 + vendor/github.com/gogo/protobuf/proto/text_gogo.go | 57 + .../github.com/gogo/protobuf/proto/text_parser.go | 1013 ++ vendor/github.com/gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 229 + .../protobuf/protobuf/google/protobuf/any.proto | 140 + .../protobuf/google/protobuf/compiler/plugin.proto | 150 + .../protobuf/google/protobuf/descriptor.proto | 813 + .../protobuf/google/protobuf/duration.proto | 98 + .../protobuf/protobuf/google/protobuf/empty.proto | 53 + .../protobuf/google/protobuf/field_mask.proto | 246 + .../protobuf/protobuf/google/protobuf/struct.proto | 96 + .../protobuf/google/protobuf/timestamp.proto | 111 + .../protobuf/google/protobuf/wrappers.proto | 119 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../protoc-gen-gogo/descriptor/descriptor.pb.go | 2088 +++ .../descriptor/descriptor_gostring.gen.go | 715 + .../protobuf/protoc-gen-gogo/descriptor/helper.go | 390 + .../github.com/gogo/protobuf/sortkeys/sortkeys.go | 101 + vendor/github.com/gogo/protobuf/types/any.go | 135 + vendor/github.com/gogo/protobuf/types/any.pb.go | 666 + vendor/github.com/gogo/protobuf/types/doc.go | 35 + vendor/github.com/gogo/protobuf/types/duration.go | 100 + .../github.com/gogo/protobuf/types/duration.pb.go | 494 + .../gogo/protobuf/types/duration_gogo.go | 100 + vendor/github.com/gogo/protobuf/types/empty.pb.go | 457 + .../gogo/protobuf/types/field_mask.pb.go | 738 + vendor/github.com/gogo/protobuf/types/struct.pb.go | 1888 ++ vendor/github.com/gogo/protobuf/types/timestamp.go | 123 + .../github.com/gogo/protobuf/types/timestamp.pb.go | 506 + .../gogo/protobuf/types/timestamp_gogo.go | 94 + .../github.com/gogo/protobuf/types/wrappers.pb.go | 2259 +++ vendor/github.com/golang/protobuf/LICENSE | 31 + vendor/github.com/golang/protobuf/README.md | 241 + vendor/github.com/golang/protobuf/proto/clone.go | 229 + vendor/github.com/golang/protobuf/proto/decode.go | 970 ++ vendor/github.com/golang/protobuf/proto/encode.go | 1362 ++ vendor/github.com/golang/protobuf/proto/equal.go | 300 + .../github.com/golang/protobuf/proto/extensions.go | 587 + vendor/github.com/golang/protobuf/proto/lib.go | 898 + .../golang/protobuf/proto/message_set.go | 311 + .../golang/protobuf/proto/pointer_reflect.go | 484 + .../golang/protobuf/proto/pointer_unsafe.go | 270 + .../github.com/golang/protobuf/proto/properties.go | 872 + vendor/github.com/golang/protobuf/proto/text.go | 854 + .../golang/protobuf/proto/text_parser.go | 895 + .../protoc-gen-go/descriptor/descriptor.pb.go | 2152 +++ vendor/github.com/golang/protobuf/ptypes/any.go | 136 + .../golang/protobuf/ptypes/any/any.pb.go | 168 + .../golang/protobuf/ptypes/any/any.proto | 139 + vendor/github.com/golang/protobuf/ptypes/doc.go | 35 + .../github.com/golang/protobuf/ptypes/duration.go | 102 + .../golang/protobuf/ptypes/duration/duration.pb.go | 146 + .../golang/protobuf/ptypes/duration/duration.proto | 117 + .../golang/protobuf/ptypes/empty/empty.pb.go | 68 + .../golang/protobuf/ptypes/empty/empty.proto | 52 + .../golang/protobuf/ptypes/struct/struct.pb.go | 382 + .../golang/protobuf/ptypes/struct/struct.proto | 96 + .../github.com/golang/protobuf/ptypes/timestamp.go | 125 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 162 + .../protobuf/ptypes/timestamp/timestamp.proto | 133 + .../google/certificate-transparency/LICENSE | 202 + .../certificate-transparency/README-MacOS.md | 55 + .../google/certificate-transparency/README.md | 303 + .../cpp/third_party/curl/hostcheck.c | 214 + .../cpp/third_party/curl/hostcheck.h | 29 + .../isec_partners/openssl_hostname_validation.c | 180 + .../isec_partners/openssl_hostname_validation.h | 59 + .../google/certificate-transparency/cpp/version.h | 12 + .../google/certificate-transparency/go/README.md | 25 + .../certificate-transparency/go/asn1/asn1.go | 956 + .../certificate-transparency/go/asn1/common.go | 163 + .../certificate-transparency/go/asn1/marshal.go | 581 + .../go/client/getentries.go | 88 + .../go/client/logclient.go | 412 + .../certificate-transparency/go/serialization.go | 691 + .../certificate-transparency/go/signatures.go | 131 + .../google/certificate-transparency/go/types.go | 374 + .../certificate-transparency/go/x509/cert_pool.go | 116 + .../go/x509/pem_decrypt.go | 233 + .../certificate-transparency/go/x509/pkcs1.go | 124 + .../certificate-transparency/go/x509/pkcs8.go | 56 + .../certificate-transparency/go/x509/pkix/pkix.go | 173 + .../certificate-transparency/go/x509/root.go | 17 + .../go/x509/root_darwin.go | 83 + .../certificate-transparency/go/x509/root_plan9.go | 33 + .../certificate-transparency/go/x509/root_stub.go | 14 + .../certificate-transparency/go/x509/root_unix.go | 37 + .../go/x509/root_windows.go | 229 + .../certificate-transparency/go/x509/sec1.go | 85 + .../certificate-transparency/go/x509/verify.go | 476 + .../certificate-transparency/go/x509/x509.go | 1622 ++ .../google/certificate-transparency/proto/ct.proto | 320 + vendor/github.com/googleapis/gax-go/LICENSE | 27 + vendor/github.com/googleapis/gax-go/README.md | 24 + vendor/github.com/googleapis/gax-go/call_option.go | 136 + vendor/github.com/googleapis/gax-go/gax.go | 40 + vendor/github.com/googleapis/gax-go/invoke.go | 90 + .../github.com/googleapis/gax-go/path_template.go | 176 + .../googleapis/gax-go/path_template_parser.go | 227 + vendor/github.com/gorilla/context/LICENSE | 27 + vendor/github.com/gorilla/context/README.md | 7 + vendor/github.com/gorilla/context/context.go | 143 + vendor/github.com/gorilla/context/doc.go | 82 + vendor/github.com/gorilla/mux/LICENSE | 27 + vendor/github.com/gorilla/mux/README.md | 242 + vendor/github.com/gorilla/mux/doc.go | 206 + vendor/github.com/gorilla/mux/mux.go | 481 + vendor/github.com/gorilla/mux/regexp.go | 312 + vendor/github.com/gorilla/mux/route.go | 627 + .../grpc-ecosystem/go-grpc-prometheus/LICENSE | 201 + .../grpc-ecosystem/go-grpc-prometheus/README.md | 245 + .../grpc-ecosystem/go-grpc-prometheus/client.go | 72 + .../go-grpc-prometheus/client_reporter.go | 111 + .../grpc-ecosystem/go-grpc-prometheus/server.go | 74 + .../go-grpc-prometheus/server_reporter.go | 157 + .../grpc-ecosystem/go-grpc-prometheus/util.go | 27 + .../hashicorp/go-immutable-radix/LICENSE | 363 + .../hashicorp/go-immutable-radix/README.md | 41 + .../hashicorp/go-immutable-radix/edges.go | 21 + .../hashicorp/go-immutable-radix/iradix.go | 333 + .../hashicorp/go-immutable-radix/iter.go | 81 + .../hashicorp/go-immutable-radix/node.go | 289 + vendor/github.com/hashicorp/go-memdb/LICENSE | 363 + vendor/github.com/hashicorp/go-memdb/README.md | 93 + vendor/github.com/hashicorp/go-memdb/index.go | 419 + vendor/github.com/hashicorp/go-memdb/memdb.go | 89 + vendor/github.com/hashicorp/go-memdb/schema.go | 85 + vendor/github.com/hashicorp/go-memdb/txn.go | 521 + vendor/github.com/hashicorp/go-msgpack/LICENSE | 25 + vendor/github.com/hashicorp/go-msgpack/README.md | 14 + .../github.com/hashicorp/go-msgpack/codec/0doc.go | 143 + .../hashicorp/go-msgpack/codec/README.md | 174 + .../github.com/hashicorp/go-msgpack/codec/binc.go | 786 + .../hashicorp/go-msgpack/codec/decode.go | 1048 ++ .../hashicorp/go-msgpack/codec/encode.go | 1001 ++ .../hashicorp/go-msgpack/codec/helper.go | 589 + .../hashicorp/go-msgpack/codec/helper_internal.go | 127 + .../hashicorp/go-msgpack/codec/msgpack.go | 816 + .../github.com/hashicorp/go-msgpack/codec/rpc.go | 152 + .../hashicorp/go-msgpack/codec/simple.go | 461 + .../github.com/hashicorp/go-msgpack/codec/time.go | 193 + vendor/github.com/hashicorp/go-multierror/LICENSE | 353 + .../github.com/hashicorp/go-multierror/README.md | 91 + .../github.com/hashicorp/go-multierror/append.go | 30 + .../github.com/hashicorp/go-multierror/format.go | 23 + .../hashicorp/go-multierror/multierror.go | 51 + vendor/github.com/hashicorp/go-sockaddr/LICENSE | 373 + vendor/github.com/hashicorp/go-sockaddr/README.md | 118 + vendor/github.com/hashicorp/go-sockaddr/doc.go | 5 + vendor/github.com/hashicorp/go-sockaddr/ifaddr.go | 126 + vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go | 969 ++ vendor/github.com/hashicorp/go-sockaddr/ifattr.go | 65 + vendor/github.com/hashicorp/go-sockaddr/ipaddr.go | 169 + vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go | 98 + .../github.com/hashicorp/go-sockaddr/ipv4addr.go | 515 + .../github.com/hashicorp/go-sockaddr/ipv6addr.go | 591 + vendor/github.com/hashicorp/go-sockaddr/rfc.go | 947 + .../github.com/hashicorp/go-sockaddr/route_info.go | 19 + .../hashicorp/go-sockaddr/route_info_bsd.go | 36 + .../hashicorp/go-sockaddr/route_info_default.go | 10 + .../hashicorp/go-sockaddr/route_info_linux.go | 37 + .../hashicorp/go-sockaddr/route_info_solaris.go | 37 + .../hashicorp/go-sockaddr/route_info_windows.go | 41 + .../github.com/hashicorp/go-sockaddr/sockaddr.go | 178 + .../github.com/hashicorp/go-sockaddr/sockaddrs.go | 193 + .../github.com/hashicorp/go-sockaddr/unixsock.go | 135 + vendor/github.com/hashicorp/golang-lru/LICENSE | 362 + vendor/github.com/hashicorp/golang-lru/README.md | 25 + .../hashicorp/golang-lru/simplelru/lru.go | 160 + vendor/github.com/hashicorp/memberlist/LICENSE | 354 + vendor/github.com/hashicorp/memberlist/README.md | 144 + .../hashicorp/memberlist/alive_delegate.go | 14 + .../github.com/hashicorp/memberlist/awareness.go | 69 + .../github.com/hashicorp/memberlist/broadcast.go | 100 + vendor/github.com/hashicorp/memberlist/config.go | 288 + .../hashicorp/memberlist/conflict_delegate.go | 10 + vendor/github.com/hashicorp/memberlist/delegate.go | 37 + .../hashicorp/memberlist/event_delegate.go | 61 + vendor/github.com/hashicorp/memberlist/keyring.go | 160 + vendor/github.com/hashicorp/memberlist/logging.go | 22 + .../github.com/hashicorp/memberlist/memberlist.go | 625 + .../hashicorp/memberlist/merge_delegate.go | 14 + .../hashicorp/memberlist/mock_transport.go | 121 + vendor/github.com/hashicorp/memberlist/net.go | 1069 ++ .../hashicorp/memberlist/net_transport.go | 289 + .../hashicorp/memberlist/ping_delegate.go | 14 + vendor/github.com/hashicorp/memberlist/queue.go | 167 + vendor/github.com/hashicorp/memberlist/security.go | 198 + vendor/github.com/hashicorp/memberlist/state.go | 1160 ++ .../github.com/hashicorp/memberlist/suspicion.go | 130 + .../github.com/hashicorp/memberlist/transport.go | 65 + vendor/github.com/hashicorp/memberlist/util.go | 296 + vendor/github.com/hashicorp/serf/LICENSE | 354 + vendor/github.com/hashicorp/serf/README.md | 114 + .../github.com/hashicorp/serf/coordinate/client.go | 180 + .../github.com/hashicorp/serf/coordinate/config.go | 70 + .../hashicorp/serf/coordinate/coordinate.go | 183 + .../hashicorp/serf/coordinate/phantom.go | 187 + vendor/github.com/hashicorp/serf/serf/broadcast.go | 27 + vendor/github.com/hashicorp/serf/serf/coalesce.go | 80 + .../hashicorp/serf/serf/coalesce_member.go | 68 + .../hashicorp/serf/serf/coalesce_user.go | 52 + vendor/github.com/hashicorp/serf/serf/config.go | 251 + .../hashicorp/serf/serf/conflict_delegate.go | 13 + vendor/github.com/hashicorp/serf/serf/delegate.go | 254 + vendor/github.com/hashicorp/serf/serf/event.go | 168 + .../hashicorp/serf/serf/event_delegate.go | 21 + .../hashicorp/serf/serf/internal_query.go | 312 + .../github.com/hashicorp/serf/serf/keymanager.go | 166 + vendor/github.com/hashicorp/serf/serf/lamport.go | 45 + .../hashicorp/serf/serf/merge_delegate.go | 44 + vendor/github.com/hashicorp/serf/serf/messages.go | 147 + .../hashicorp/serf/serf/ping_delegate.go | 89 + vendor/github.com/hashicorp/serf/serf/query.go | 210 + vendor/github.com/hashicorp/serf/serf/serf.go | 1692 ++ vendor/github.com/hashicorp/serf/serf/snapshot.go | 560 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 122 + vendor/github.com/imdario/mergo/doc.go | 44 + vendor/github.com/imdario/mergo/map.go | 154 + vendor/github.com/imdario/mergo/merge.go | 120 + vendor/github.com/imdario/mergo/mergo.go | 90 + .../github.com/inconshreveable/mousetrap/LICENSE | 13 + .../github.com/inconshreveable/mousetrap/README.md | 23 + .../inconshreveable/mousetrap/trap_others.go | 15 + .../inconshreveable/mousetrap/trap_windows.go | 98 + .../inconshreveable/mousetrap/trap_windows_1.4.go | 46 + vendor/github.com/jmespath/go-jmespath/LICENSE | 13 + vendor/github.com/jmespath/go-jmespath/README.md | 7 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../jmespath/go-jmespath/astnodetype_string.go | 16 + .../github.com/jmespath/go-jmespath/functions.go | 842 + .../github.com/jmespath/go-jmespath/interpreter.go | 418 + vendor/github.com/jmespath/go-jmespath/lexer.go | 420 + vendor/github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + vendor/github.com/jmespath/go-jmespath/util.go | 185 + vendor/github.com/kr/pty/License | 23 + vendor/github.com/kr/pty/README.md | 36 + vendor/github.com/kr/pty/doc.go | 16 + vendor/github.com/kr/pty/ioctl.go | 11 + vendor/github.com/kr/pty/ioctl_bsd.go | 39 + vendor/github.com/kr/pty/pty_darwin.go | 60 + vendor/github.com/kr/pty/pty_freebsd.go | 73 + vendor/github.com/kr/pty/pty_linux.go | 46 + vendor/github.com/kr/pty/pty_unsupported.go | 11 + vendor/github.com/kr/pty/run.go | 28 + vendor/github.com/kr/pty/util.go | 35 + vendor/github.com/kr/pty/ztypes_386.go | 9 + vendor/github.com/kr/pty/ztypes_amd64.go | 9 + vendor/github.com/kr/pty/ztypes_arm.go | 9 + vendor/github.com/kr/pty/ztypes_arm64.go | 11 + vendor/github.com/kr/pty/ztypes_freebsd_386.go | 13 + vendor/github.com/kr/pty/ztypes_freebsd_amd64.go | 14 + vendor/github.com/kr/pty/ztypes_freebsd_arm.go | 13 + vendor/github.com/kr/pty/ztypes_ppc64.go | 11 + vendor/github.com/kr/pty/ztypes_ppc64le.go | 11 + vendor/github.com/kr/pty/ztypes_s390x.go | 11 + vendor/github.com/mattn/go-shellwords/LICENSE | 21 + vendor/github.com/mattn/go-shellwords/README.md | 47 + .../github.com/mattn/go-shellwords/shellwords.go | 145 + .../github.com/mattn/go-shellwords/util_posix.go | 19 + .../github.com/mattn/go-shellwords/util_windows.go | 17 + .../matttproud/golang_protobuf_extensions/LICENSE | 201 + .../matttproud/golang_protobuf_extensions/NOTICE | 1 + .../golang_protobuf_extensions/README.md | 20 + .../golang_protobuf_extensions/pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../golang_protobuf_extensions/pbutil/encode.go | 46 + vendor/github.com/miekg/dns/LICENSE | 32 + vendor/github.com/miekg/dns/README.md | 153 + vendor/github.com/miekg/dns/client.go | 385 + vendor/github.com/miekg/dns/clientconfig.go | 99 + vendor/github.com/miekg/dns/defaults.go | 278 + vendor/github.com/miekg/dns/dns.go | 100 + vendor/github.com/miekg/dns/dnssec.go | 664 + vendor/github.com/miekg/dns/dnssec_keygen.go | 156 + vendor/github.com/miekg/dns/dnssec_keyscan.go | 249 + vendor/github.com/miekg/dns/dnssec_privkey.go | 85 + vendor/github.com/miekg/dns/doc.go | 251 + vendor/github.com/miekg/dns/edns.go | 505 + vendor/github.com/miekg/dns/format.go | 96 + vendor/github.com/miekg/dns/labels.go | 162 + vendor/github.com/miekg/dns/msg.go | 1945 +++ vendor/github.com/miekg/dns/nsecx.go | 112 + vendor/github.com/miekg/dns/privaterr.go | 117 + vendor/github.com/miekg/dns/rawmsg.go | 95 + vendor/github.com/miekg/dns/sanitize.go | 84 + vendor/github.com/miekg/dns/scanner.go | 43 + vendor/github.com/miekg/dns/server.go | 690 + vendor/github.com/miekg/dns/sig0.go | 216 + vendor/github.com/miekg/dns/singleinflight.go | 57 + vendor/github.com/miekg/dns/tlsa.go | 86 + vendor/github.com/miekg/dns/tsig.go | 320 + vendor/github.com/miekg/dns/types.go | 1328 ++ vendor/github.com/miekg/dns/udp.go | 58 + vendor/github.com/miekg/dns/udp_linux.go | 73 + vendor/github.com/miekg/dns/udp_other.go | 17 + vendor/github.com/miekg/dns/udp_windows.go | 34 + vendor/github.com/miekg/dns/update.go | 94 + vendor/github.com/miekg/dns/xfr.go | 244 + vendor/github.com/miekg/dns/zgenerate.go | 158 + vendor/github.com/miekg/dns/zscan.go | 974 ++ vendor/github.com/miekg/dns/zscan_rr.go | 2270 +++ vendor/github.com/miekg/dns/ztypes.go | 842 + vendor/github.com/mistifyio/go-zfs/LICENSE | 201 + vendor/github.com/mistifyio/go-zfs/README.md | 54 + vendor/github.com/mistifyio/go-zfs/error.go | 18 + vendor/github.com/mistifyio/go-zfs/utils.go | 352 + .../mistifyio/go-zfs/utils_notsolaris.go | 17 + .../github.com/mistifyio/go-zfs/utils_solaris.go | 17 + vendor/github.com/mistifyio/go-zfs/zfs.go | 451 + vendor/github.com/mistifyio/go-zfs/zpool.go | 112 + vendor/github.com/mitchellh/mapstructure/LICENSE | 21 + vendor/github.com/mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 154 + vendor/github.com/mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 782 + vendor/github.com/mrunalp/fileutils/LICENSE | 191 + vendor/github.com/mrunalp/fileutils/README.md | 5 + vendor/github.com/mrunalp/fileutils/fileutils.go | 161 + vendor/github.com/mrunalp/fileutils/idtools.go | 49 + .../opencontainers/go-digest/LICENSE.code | 191 + .../opencontainers/go-digest/LICENSE.docs | 425 + .../github.com/opencontainers/go-digest/README.md | 104 + .../opencontainers/go-digest/algorithm.go | 144 + .../github.com/opencontainers/go-digest/digest.go | 140 + .../opencontainers/go-digest/digester.go | 25 + vendor/github.com/opencontainers/go-digest/doc.go | 42 + .../opencontainers/go-digest/verifiers.go | 31 + .../github.com/opencontainers/image-spec/LICENSE | 191 + .../github.com/opencontainers/image-spec/README.md | 167 + .../image-spec/specs-go/v1/config.go | 103 + .../image-spec/specs-go/v1/descriptor.go | 68 + .../opencontainers/image-spec/specs-go/v1/index.go | 29 + .../image-spec/specs-go/v1/layout.go | 28 + .../image-spec/specs-go/v1/manifest.go | 32 + .../image-spec/specs-go/v1/mediatype.go | 48 + .../opencontainers/image-spec/specs-go/version.go | 32 + .../image-spec/specs-go/versioned.go | 23 + vendor/github.com/opencontainers/runc/LICENSE | 191 + vendor/github.com/opencontainers/runc/NOTICE | 17 + vendor/github.com/opencontainers/runc/README.md | 222 + vendor/github.com/opencontainers/runc/VERSION | 1 + .../github.com/opencontainers/runc/checkpoint.go | 132 + vendor/github.com/opencontainers/runc/create.go | 74 + vendor/github.com/opencontainers/runc/delete.go | 84 + vendor/github.com/opencontainers/runc/events.go | 260 + vendor/github.com/opencontainers/runc/exec.go | 211 + vendor/github.com/opencontainers/runc/kill.go | 115 + .../opencontainers/runc/libcontainer/README.md | 262 + .../runc/libcontainer/apparmor/apparmor.go | 39 + .../libcontainer/apparmor/apparmor_disabled.go | 20 + .../runc/libcontainer/capabilities_linux.go | 114 + .../runc/libcontainer/cgroups/cgroups.go | 64 + .../libcontainer/cgroups/cgroups_unsupported.go | 3 + .../runc/libcontainer/cgroups/fs/apply_raw.go | 360 + .../runc/libcontainer/cgroups/fs/blkio.go | 237 + .../runc/libcontainer/cgroups/fs/cpu.go | 125 + .../runc/libcontainer/cgroups/fs/cpuacct.go | 121 + .../runc/libcontainer/cgroups/fs/cpuset.go | 162 + .../runc/libcontainer/cgroups/fs/devices.go | 80 + .../runc/libcontainer/cgroups/fs/freezer.go | 61 + .../runc/libcontainer/cgroups/fs/fs_unsupported.go | 3 + .../runc/libcontainer/cgroups/fs/hugetlb.go | 71 + .../runc/libcontainer/cgroups/fs/memory.go | 305 + .../runc/libcontainer/cgroups/fs/name.go | 40 + .../runc/libcontainer/cgroups/fs/net_cls.go | 43 + .../runc/libcontainer/cgroups/fs/net_prio.go | 41 + .../runc/libcontainer/cgroups/fs/perf_event.go | 35 + .../runc/libcontainer/cgroups/fs/pids.go | 73 + .../runc/libcontainer/cgroups/fs/utils.go | 78 + .../runc/libcontainer/cgroups/rootless/rootless.go | 128 + .../runc/libcontainer/cgroups/stats.go | 106 + .../cgroups/systemd/apply_nosystemd.go | 55 + .../libcontainer/cgroups/systemd/apply_systemd.go | 555 + .../runc/libcontainer/cgroups/utils.go | 447 + .../runc/libcontainer/compat_1.5_linux.go | 10 + .../runc/libcontainer/configs/blkio_device.go | 61 + .../runc/libcontainer/configs/cgroup_unix.go | 124 + .../libcontainer/configs/cgroup_unsupported.go | 6 + .../runc/libcontainer/configs/cgroup_windows.go | 6 + .../runc/libcontainer/configs/config.go | 343 + .../runc/libcontainer/configs/config_unix.go | 63 + .../runc/libcontainer/configs/device.go | 57 + .../runc/libcontainer/configs/device_defaults.go | 111 + .../runc/libcontainer/configs/hugepage_limit.go | 9 + .../libcontainer/configs/interface_priority_map.go | 14 + .../runc/libcontainer/configs/mount.go | 39 + .../runc/libcontainer/configs/namespaces.go | 5 + .../libcontainer/configs/namespaces_syscall.go | 31 + .../configs/namespaces_syscall_unsupported.go | 13 + .../runc/libcontainer/configs/namespaces_unix.go | 127 + .../libcontainer/configs/namespaces_unsupported.go | 8 + .../runc/libcontainer/configs/network.go | 72 + .../runc/libcontainer/configs/validate/rootless.go | 117 + .../libcontainer/configs/validate/validator.go | 195 + .../opencontainers/runc/libcontainer/console.go | 17 + .../runc/libcontainer/console_freebsd.go | 13 + .../runc/libcontainer/console_linux.go | 154 + .../runc/libcontainer/console_solaris.go | 11 + .../runc/libcontainer/console_windows.go | 30 + .../opencontainers/runc/libcontainer/container.go | 166 + .../runc/libcontainer/container_linux.go | 1573 ++ .../runc/libcontainer/container_solaris.go | 20 + .../runc/libcontainer/container_windows.go | 20 + .../runc/libcontainer/criu_opts_unix.go | 39 + .../runc/libcontainer/criu_opts_windows.go | 6 + .../runc/libcontainer/criurpc/criurpc.pb.go | 822 + .../runc/libcontainer/criurpc/criurpc.proto | 174 + .../runc/libcontainer/devices/devices_unix.go | 106 + .../libcontainer/devices/devices_unsupported.go | 3 + .../runc/libcontainer/devices/number.go | 24 + .../opencontainers/runc/libcontainer/error.go | 70 + .../opencontainers/runc/libcontainer/factory.go | 44 + .../runc/libcontainer/factory_linux.go | 344 + .../runc/libcontainer/generic_error.go | 92 + .../opencontainers/runc/libcontainer/init_linux.go | 500 + .../runc/libcontainer/keys/keyctl.go | 66 + .../runc/libcontainer/message_linux.go | 91 + .../runc/libcontainer/network_linux.go | 259 + .../runc/libcontainer/notify_linux.go | 89 + .../runc/libcontainer/nsenter/README.md | 44 + .../runc/libcontainer/nsenter/namespace.h | 32 + .../runc/libcontainer/nsenter/nsenter.go | 12 + .../runc/libcontainer/nsenter/nsenter_gccgo.go | 25 + .../libcontainer/nsenter/nsenter_unsupported.go | 5 + .../runc/libcontainer/nsenter/nsexec.c | 863 + .../opencontainers/runc/libcontainer/process.go | 106 + .../runc/libcontainer/process_linux.go | 483 + .../runc/libcontainer/restored_process.go | 122 + .../runc/libcontainer/rootfs_linux.go | 812 + .../runc/libcontainer/seccomp/config.go | 76 + .../runc/libcontainer/seccomp/seccomp_linux.go | 229 + .../libcontainer/seccomp/seccomp_unsupported.go | 24 + .../runc/libcontainer/setgroups_linux.go | 11 + .../runc/libcontainer/setns_init_linux.go | 63 + .../runc/libcontainer/specconv/example.go | 227 + .../runc/libcontainer/specconv/spec_linux.go | 805 + .../runc/libcontainer/stacktrace/capture.go | 27 + .../runc/libcontainer/stacktrace/frame.go | 38 + .../runc/libcontainer/stacktrace/stacktrace.go | 5 + .../runc/libcontainer/standard_init_linux.go | 190 + .../runc/libcontainer/state_linux.go | 247 + .../opencontainers/runc/libcontainer/stats.go | 15 + .../runc/libcontainer/stats_freebsd.go | 5 + .../runc/libcontainer/stats_linux.go | 8 + .../runc/libcontainer/stats_solaris.go | 7 + .../runc/libcontainer/stats_windows.go | 5 + .../opencontainers/runc/libcontainer/sync.go | 107 + .../runc/libcontainer/system/linux.go | 143 + .../runc/libcontainer/system/proc.go | 43 + .../runc/libcontainer/system/setns_linux.go | 40 + .../runc/libcontainer/system/syscall_linux_386.go | 25 + .../runc/libcontainer/system/syscall_linux_64.go | 25 + .../runc/libcontainer/system/syscall_linux_arm.go | 25 + .../runc/libcontainer/system/sysconfig.go | 12 + .../runc/libcontainer/system/sysconfig_notcgo.go | 15 + .../runc/libcontainer/system/unsupported.go | 9 + .../runc/libcontainer/system/xattrs_linux.go | 99 + .../runc/libcontainer/user/lookup.go | 110 + .../runc/libcontainer/user/lookup_unix.go | 30 + .../runc/libcontainer/user/lookup_unsupported.go | 21 + .../opencontainers/runc/libcontainer/user/user.go | 441 + .../opencontainers/runc/libcontainer/utils/cmsg.go | 95 + .../runc/libcontainer/utils/utils.go | 126 + .../runc/libcontainer/utils/utils_unix.go | 43 + vendor/github.com/opencontainers/runc/list.go | 175 + vendor/github.com/opencontainers/runc/main.go | 149 + .../github.com/opencontainers/runc/main_solaris.go | 21 + vendor/github.com/opencontainers/runc/main_unix.go | 33 + .../opencontainers/runc/main_unsupported.go | 13 + .../opencontainers/runc/notify_socket.go | 108 + vendor/github.com/opencontainers/runc/pause.go | 57 + vendor/github.com/opencontainers/runc/ps.go | 109 + vendor/github.com/opencontainers/runc/restore.go | 216 + .../github.com/opencontainers/runc/rlimit_linux.go | 49 + vendor/github.com/opencontainers/runc/run.go | 84 + vendor/github.com/opencontainers/runc/signals.go | 135 + vendor/github.com/opencontainers/runc/spec.go | 160 + vendor/github.com/opencontainers/runc/start.go | 43 + vendor/github.com/opencontainers/runc/state.go | 60 + vendor/github.com/opencontainers/runc/tty.go | 134 + vendor/github.com/opencontainers/runc/update.go | 259 + vendor/github.com/opencontainers/runc/utils.go | 82 + .../github.com/opencontainers/runc/utils_linux.go | 378 + vendor/github.com/opencontainers/runc/vendor.conf | 21 + .../github.com/opencontainers/runtime-spec/LICENSE | 191 + .../opencontainers/runtime-spec/README.md | 164 + .../opencontainers/runtime-spec/specs-go/config.go | 563 + .../opencontainers/runtime-spec/specs-go/state.go | 17 + .../runtime-spec/specs-go/version.go | 18 + vendor/github.com/opencontainers/selinux/LICENSE | 201 + vendor/github.com/opencontainers/selinux/README.md | 7 + .../selinux/go-selinux/label/label.go | 84 + .../selinux/go-selinux/label/label_selinux.go | 204 + .../opencontainers/selinux/go-selinux/selinux.go | 593 + .../opencontainers/selinux/go-selinux/xattrs.go | 78 + vendor/github.com/pborman/uuid/LICENSE | 27 + vendor/github.com/pborman/uuid/README.md | 13 + vendor/github.com/pborman/uuid/dce.go | 84 + vendor/github.com/pborman/uuid/doc.go | 8 + vendor/github.com/pborman/uuid/hash.go | 53 + vendor/github.com/pborman/uuid/json.go | 34 + vendor/github.com/pborman/uuid/node.go | 117 + vendor/github.com/pborman/uuid/sql.go | 58 + vendor/github.com/pborman/uuid/time.go | 132 + vendor/github.com/pborman/uuid/util.go | 43 + vendor/github.com/pborman/uuid/uuid.go | 176 + vendor/github.com/pborman/uuid/version1.go | 41 + vendor/github.com/pborman/uuid/version4.go | 25 + vendor/github.com/philhofer/fwd/LICENSE.md | 7 + vendor/github.com/philhofer/fwd/README.md | 315 + vendor/github.com/philhofer/fwd/reader.go | 379 + vendor/github.com/philhofer/fwd/writer.go | 224 + .../github.com/philhofer/fwd/writer_appengine.go | 5 + vendor/github.com/philhofer/fwd/writer_unsafe.go | 18 + vendor/github.com/pivotal-golang/clock/LICENSE | 202 + vendor/github.com/pivotal-golang/clock/README.md | 1 + vendor/github.com/pivotal-golang/clock/clock.go | 42 + vendor/github.com/pivotal-golang/clock/ticker.go | 20 + vendor/github.com/pivotal-golang/clock/timer.go | 25 + vendor/github.com/pkg/errors/LICENSE | 23 + vendor/github.com/pkg/errors/README.md | 52 + vendor/github.com/pkg/errors/errors.go | 269 + vendor/github.com/pkg/errors/stack.go | 178 + vendor/github.com/pmezard/go-difflib/LICENSE | 27 + vendor/github.com/pmezard/go-difflib/README.md | 50 + .../pmezard/go-difflib/difflib/difflib.go | 772 + vendor/github.com/prometheus/client_golang/LICENSE | 201 + vendor/github.com/prometheus/client_golang/NOTICE | 28 + .../github.com/prometheus/client_golang/README.md | 45 + .../prometheus/client_golang/prometheus/README.md | 53 + .../client_golang/prometheus/collector.go | 75 + .../prometheus/client_golang/prometheus/counter.go | 173 + .../prometheus/client_golang/prometheus/desc.go | 192 + .../prometheus/client_golang/prometheus/doc.go | 111 + .../prometheus/client_golang/prometheus/expvar.go | 119 + .../prometheus/client_golang/prometheus/fnv.go | 29 + .../prometheus/client_golang/prometheus/gauge.go | 144 + .../client_golang/prometheus/go_collector.go | 263 + .../client_golang/prometheus/histogram.go | 448 + .../prometheus/client_golang/prometheus/http.go | 381 + .../prometheus/client_golang/prometheus/metric.go | 166 + .../client_golang/prometheus/process_collector.go | 142 + .../prometheus/client_golang/prometheus/push.go | 65 + .../client_golang/prometheus/registry.go | 741 + .../prometheus/client_golang/prometheus/summary.go | 538 + .../prometheus/client_golang/prometheus/untyped.go | 142 + .../prometheus/client_golang/prometheus/value.go | 234 + .../prometheus/client_golang/prometheus/vec.go | 249 + vendor/github.com/prometheus/client_model/LICENSE | 201 + vendor/github.com/prometheus/client_model/NOTICE | 5 + .../github.com/prometheus/client_model/README.md | 26 + .../prometheus/client_model/go/metrics.pb.go | 364 + vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + vendor/github.com/prometheus/common/README.md | 12 + .../github.com/prometheus/common/expfmt/decode.go | 412 + .../github.com/prometheus/common/expfmt/encode.go | 88 + .../github.com/prometheus/common/expfmt/expfmt.go | 40 + vendor/github.com/prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 300 + .../prometheus/common/expfmt/text_parse.go | 753 + .../internal/bitbucket.org/ww/goautoneg/README.txt | 67 + .../internal/bitbucket.org/ww/goautoneg/autoneg.go | 162 + vendor/github.com/prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + vendor/github.com/prometheus/common/model/fnv.go | 42 + .../github.com/prometheus/common/model/labels.go | 206 + .../github.com/prometheus/common/model/labelset.go | 169 + .../github.com/prometheus/common/model/metric.go | 98 + vendor/github.com/prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../github.com/prometheus/common/model/silence.go | 106 + vendor/github.com/prometheus/common/model/time.go | 249 + vendor/github.com/prometheus/common/model/value.go | 403 + vendor/github.com/prometheus/procfs/LICENSE | 201 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 10 + vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 33 + vendor/github.com/prometheus/procfs/ipvs.go | 224 + vendor/github.com/prometheus/procfs/mdstat.go | 138 + vendor/github.com/prometheus/procfs/proc.go | 212 + vendor/github.com/prometheus/procfs/proc_io.go | 55 + vendor/github.com/prometheus/procfs/proc_limits.go | 137 + vendor/github.com/prometheus/procfs/proc_stat.go | 175 + vendor/github.com/prometheus/procfs/stat.go | 56 + vendor/github.com/rcrowley/go-metrics/LICENSE | 29 + vendor/github.com/rcrowley/go-metrics/README.md | 139 + vendor/github.com/rcrowley/go-metrics/counter.go | 112 + vendor/github.com/rcrowley/go-metrics/debug.go | 76 + vendor/github.com/rcrowley/go-metrics/ewma.go | 118 + vendor/github.com/rcrowley/go-metrics/gauge.go | 84 + .../rcrowley/go-metrics/gauge_float64.go | 91 + vendor/github.com/rcrowley/go-metrics/graphite.go | 113 + .../github.com/rcrowley/go-metrics/healthcheck.go | 61 + vendor/github.com/rcrowley/go-metrics/histogram.go | 202 + vendor/github.com/rcrowley/go-metrics/json.go | 83 + vendor/github.com/rcrowley/go-metrics/log.go | 77 + vendor/github.com/rcrowley/go-metrics/meter.go | 233 + vendor/github.com/rcrowley/go-metrics/metrics.go | 13 + vendor/github.com/rcrowley/go-metrics/opentsdb.go | 119 + vendor/github.com/rcrowley/go-metrics/registry.go | 247 + vendor/github.com/rcrowley/go-metrics/runtime.go | 212 + .../github.com/rcrowley/go-metrics/runtime_cgo.go | 10 + .../rcrowley/go-metrics/runtime_gccpufraction.go | 9 + .../rcrowley/go-metrics/runtime_no_cgo.go | 7 + .../go-metrics/runtime_no_gccpufraction.go | 9 + vendor/github.com/rcrowley/go-metrics/sample.go | 609 + vendor/github.com/rcrowley/go-metrics/syslog.go | 78 + vendor/github.com/rcrowley/go-metrics/timer.go | 311 + vendor/github.com/rcrowley/go-metrics/writer.go | 100 + vendor/github.com/resin-os/circbuf/LICENSE | 20 + vendor/github.com/resin-os/circbuf/README.md | 28 + vendor/github.com/resin-os/circbuf/circbuf.go | 115 + vendor/github.com/resin-os/librsync-go/LICENSE | 177 + vendor/github.com/resin-os/librsync-go/README.md | 15 + vendor/github.com/resin-os/librsync-go/delta.go | 83 + vendor/github.com/resin-os/librsync-go/match.go | 151 + vendor/github.com/resin-os/librsync-go/op.go | 539 + vendor/github.com/resin-os/librsync-go/patch.go | 88 + vendor/github.com/resin-os/librsync-go/rollsum.go | 67 + .../github.com/resin-os/librsync-go/signature.go | 98 + vendor/github.com/sean-/seed/LICENSE | 54 + vendor/github.com/sean-/seed/README.md | 44 + vendor/github.com/sean-/seed/init.go | 84 + .../github.com/seccomp/libseccomp-golang/LICENSE | 22 + vendor/github.com/seccomp/libseccomp-golang/README | 26 + .../seccomp/libseccomp-golang/seccomp.go | 857 + .../seccomp/libseccomp-golang/seccomp_internal.go | 506 + vendor/github.com/spf13/cobra/LICENSE.txt | 174 + vendor/github.com/spf13/cobra/README.md | 928 + vendor/github.com/spf13/cobra/args.go | 98 + vendor/github.com/spf13/cobra/bash_completions.go | 630 + vendor/github.com/spf13/cobra/cobra.go | 175 + vendor/github.com/spf13/cobra/command.go | 1359 ++ vendor/github.com/spf13/cobra/command_notwin.go | 5 + vendor/github.com/spf13/cobra/command_win.go | 26 + vendor/github.com/spf13/pflag/LICENSE | 28 + vendor/github.com/spf13/pflag/README.md | 277 + vendor/github.com/spf13/pflag/bool.go | 94 + vendor/github.com/spf13/pflag/bool_slice.go | 147 + vendor/github.com/spf13/pflag/count.go | 94 + vendor/github.com/spf13/pflag/duration.go | 86 + vendor/github.com/spf13/pflag/flag.go | 1063 ++ vendor/github.com/spf13/pflag/float32.go | 88 + vendor/github.com/spf13/pflag/float64.go | 84 + vendor/github.com/spf13/pflag/golangflag.go | 101 + vendor/github.com/spf13/pflag/int.go | 84 + vendor/github.com/spf13/pflag/int32.go | 88 + vendor/github.com/spf13/pflag/int64.go | 84 + vendor/github.com/spf13/pflag/int8.go | 88 + vendor/github.com/spf13/pflag/int_slice.go | 128 + vendor/github.com/spf13/pflag/ip.go | 94 + vendor/github.com/spf13/pflag/ip_slice.go | 148 + vendor/github.com/spf13/pflag/ipmask.go | 122 + vendor/github.com/spf13/pflag/ipnet.go | 98 + vendor/github.com/spf13/pflag/string.go | 80 + vendor/github.com/spf13/pflag/string_array.go | 103 + vendor/github.com/spf13/pflag/string_slice.go | 129 + vendor/github.com/spf13/pflag/uint.go | 88 + vendor/github.com/spf13/pflag/uint16.go | 88 + vendor/github.com/spf13/pflag/uint32.go | 88 + vendor/github.com/spf13/pflag/uint64.go | 88 + vendor/github.com/spf13/pflag/uint8.go | 88 + vendor/github.com/spf13/pflag/uint_slice.go | 126 + vendor/github.com/stevvooe/continuity/LICENSE | 202 + vendor/github.com/stevvooe/continuity/README.md | 19 + vendor/github.com/stevvooe/continuity/sysx/asm.s | 10 + .../stevvooe/continuity/sysx/chmod_darwin.go | 18 + .../stevvooe/continuity/sysx/chmod_darwin_386.go | 25 + .../stevvooe/continuity/sysx/chmod_darwin_amd64.go | 25 + .../stevvooe/continuity/sysx/chmod_freebsd.go | 17 + .../continuity/sysx/chmod_freebsd_amd64.go | 25 + .../stevvooe/continuity/sysx/chmod_linux.go | 12 + .../stevvooe/continuity/sysx/copy_linux.go | 9 + .../stevvooe/continuity/sysx/copy_linux_386.go | 20 + .../stevvooe/continuity/sysx/copy_linux_amd64.go | 20 + .../stevvooe/continuity/sysx/copy_linux_arm.go | 20 + .../stevvooe/continuity/sysx/copy_linux_arm64.go | 20 + .../stevvooe/continuity/sysx/nodata_linux.go | 7 + .../stevvooe/continuity/sysx/nodata_unix.go | 9 + vendor/github.com/stevvooe/continuity/sysx/sys.go | 37 + .../stevvooe/continuity/sysx/sysnum_linux_386.go | 7 + .../stevvooe/continuity/sysx/sysnum_linux_amd64.go | 7 + .../stevvooe/continuity/sysx/sysnum_linux_arm.go | 7 + .../stevvooe/continuity/sysx/sysnum_linux_arm64.go | 7 + .../github.com/stevvooe/continuity/sysx/xattr.go | 67 + .../stevvooe/continuity/sysx/xattr_darwin.go | 71 + .../stevvooe/continuity/sysx/xattr_darwin_386.go | 111 + .../stevvooe/continuity/sysx/xattr_darwin_amd64.go | 111 + .../stevvooe/continuity/sysx/xattr_freebsd.go | 53 + .../stevvooe/continuity/sysx/xattr_linux.go | 61 + .../stevvooe/continuity/sysx/xattr_linux_386.go | 111 + .../stevvooe/continuity/sysx/xattr_linux_amd64.go | 111 + .../stevvooe/continuity/sysx/xattr_linux_arm.go | 111 + .../stevvooe/continuity/sysx/xattr_linux_arm64.go | 111 + .../stevvooe/continuity/sysx/xattr_linux_ppc64.go | 111 + .../continuity/sysx/xattr_linux_ppc64le.go | 111 + .../stevvooe/continuity/sysx/xattr_linux_s390x.go | 111 + vendor/github.com/stretchr/testify/LICENSE | 22 + vendor/github.com/stretchr/testify/README.md | 332 + .../stretchr/testify/assert/assertion_forward.go | 352 + .../stretchr/testify/assert/assertions.go | 1069 ++ vendor/github.com/stretchr/testify/assert/doc.go | 45 + .../github.com/stretchr/testify/assert/errors.go | 10 + .../stretchr/testify/assert/forward_assertions.go | 16 + .../stretchr/testify/assert/http_assertions.go | 106 + vendor/github.com/stretchr/testify/require/doc.go | 28 + .../testify/require/forward_requirements.go | 16 + .../github.com/stretchr/testify/require/require.go | 429 + .../stretchr/testify/require/require_forward.go | 353 + .../stretchr/testify/require/requirements.go | 9 + vendor/github.com/syndtr/gocapability/LICENSE | 24 + .../syndtr/gocapability/capability/capability.go | 72 + .../gocapability/capability/capability_linux.go | 650 + .../gocapability/capability/capability_noop.go | 19 + .../syndtr/gocapability/capability/enum.go | 268 + .../syndtr/gocapability/capability/enum_gen.go | 129 + .../gocapability/capability/syscall_linux.go | 154 + vendor/github.com/tchap/go-patricia/LICENSE | 20 + vendor/github.com/tchap/go-patricia/README.md | 123 + .../tchap/go-patricia/patricia/children.go | 325 + .../tchap/go-patricia/patricia/patricia.go | 594 + vendor/github.com/tinylib/msgp/LICENSE | 8 + vendor/github.com/tinylib/msgp/README.md | 104 + vendor/github.com/tinylib/msgp/msgp/circular.go | 38 + vendor/github.com/tinylib/msgp/msgp/defs.go | 142 + vendor/github.com/tinylib/msgp/msgp/edit.go | 241 + vendor/github.com/tinylib/msgp/msgp/elsize.go | 99 + vendor/github.com/tinylib/msgp/msgp/errors.go | 142 + vendor/github.com/tinylib/msgp/msgp/extension.go | 548 + vendor/github.com/tinylib/msgp/msgp/integers.go | 174 + vendor/github.com/tinylib/msgp/msgp/json.go | 542 + vendor/github.com/tinylib/msgp/msgp/json_bytes.go | 363 + vendor/github.com/tinylib/msgp/msgp/number.go | 140 + .../tinylib/msgp/msgp/number_appengine.go | 101 + .../github.com/tinylib/msgp/msgp/number_unsafe.go | 159 + vendor/github.com/tinylib/msgp/msgp/read.go | 1118 ++ vendor/github.com/tinylib/msgp/msgp/read_bytes.go | 1073 ++ vendor/github.com/tinylib/msgp/msgp/size.go | 38 + vendor/github.com/tinylib/msgp/msgp/write.go | 768 + vendor/github.com/tinylib/msgp/msgp/write_bytes.go | 369 + vendor/github.com/tonistiigi/fifo/LICENSE | 21 + vendor/github.com/tonistiigi/fifo/fifo.go | 217 + vendor/github.com/tonistiigi/fifo/handle_linux.go | 76 + .../github.com/tonistiigi/fifo/handle_nolinux.go | 49 + vendor/github.com/tonistiigi/fifo/readme.md | 30 + vendor/github.com/tonistiigi/fsutil/LICENSE | 22 + vendor/github.com/tonistiigi/fsutil/diff.go | 37 + .../tonistiigi/fsutil/diff_containerd.go | 199 + .../tonistiigi/fsutil/diff_containerd_linux.go | 37 + vendor/github.com/tonistiigi/fsutil/diskwriter.go | 353 + .../tonistiigi/fsutil/diskwriter_linux.go | 64 + .../tonistiigi/fsutil/diskwriter_windows.go | 25 + vendor/github.com/tonistiigi/fsutil/generate.go | 3 + vendor/github.com/tonistiigi/fsutil/hardlinks.go | 46 + vendor/github.com/tonistiigi/fsutil/readme.md | 45 + vendor/github.com/tonistiigi/fsutil/receive.go | 210 + .../tonistiigi/fsutil/receive_unsupported.go | 14 + vendor/github.com/tonistiigi/fsutil/send.go | 201 + vendor/github.com/tonistiigi/fsutil/stat.pb.go | 931 + vendor/github.com/tonistiigi/fsutil/stat.proto | 17 + vendor/github.com/tonistiigi/fsutil/validator.go | 88 + vendor/github.com/tonistiigi/fsutil/walker.go | 170 + vendor/github.com/tonistiigi/fsutil/walker_unix.go | 61 + .../github.com/tonistiigi/fsutil/walker_windows.go | 14 + vendor/github.com/tonistiigi/fsutil/wire.pb.go | 563 + vendor/github.com/tonistiigi/fsutil/wire.proto | 18 + vendor/github.com/urfave/cli/LICENSE | 21 + vendor/github.com/urfave/cli/README.md | 1381 ++ vendor/github.com/urfave/cli/app.go | 492 + vendor/github.com/urfave/cli/category.go | 44 + vendor/github.com/urfave/cli/cli.go | 21 + vendor/github.com/urfave/cli/command.go | 299 + vendor/github.com/urfave/cli/context.go | 276 + vendor/github.com/urfave/cli/errors.go | 115 + vendor/github.com/urfave/cli/flag.go | 799 + vendor/github.com/urfave/cli/flag_generated.go | 627 + vendor/github.com/urfave/cli/funcs.go | 28 + vendor/github.com/urfave/cli/help.go | 294 + vendor/github.com/vbatts/tar-split/LICENSE | 28 + vendor/github.com/vbatts/tar-split/README.md | 137 + .../vbatts/tar-split/archive/tar/common.go | 340 + .../vbatts/tar-split/archive/tar/reader.go | 1064 ++ .../vbatts/tar-split/archive/tar/stat_atim.go | 20 + .../vbatts/tar-split/archive/tar/stat_atimespec.go | 20 + .../vbatts/tar-split/archive/tar/stat_unix.go | 32 + .../vbatts/tar-split/archive/tar/writer.go | 416 + .../github.com/vbatts/tar-split/tar/asm/README.md | 44 + .../vbatts/tar-split/tar/asm/assemble.go | 130 + .../vbatts/tar-split/tar/asm/disassemble.go | 141 + vendor/github.com/vbatts/tar-split/tar/asm/doc.go | 9 + .../github.com/vbatts/tar-split/tar/storage/doc.go | 12 + .../vbatts/tar-split/tar/storage/entry.go | 78 + .../vbatts/tar-split/tar/storage/getter.go | 104 + .../vbatts/tar-split/tar/storage/packer.go | 127 + vendor/github.com/vdemeester/shakers/LICENSE | 191 + vendor/github.com/vdemeester/shakers/README.md | 30 + vendor/github.com/vdemeester/shakers/bool.go | 46 + vendor/github.com/vdemeester/shakers/common.go | 310 + vendor/github.com/vdemeester/shakers/string.go | 168 + vendor/github.com/vdemeester/shakers/time.go | 234 + vendor/github.com/vishvananda/netlink/LICENSE | 192 + vendor/github.com/vishvananda/netlink/README.md | 89 + vendor/github.com/vishvananda/netlink/addr.go | 56 + .../github.com/vishvananda/netlink/addr_linux.go | 288 + vendor/github.com/vishvananda/netlink/bpf_linux.go | 62 + vendor/github.com/vishvananda/netlink/class.go | 78 + .../github.com/vishvananda/netlink/class_linux.go | 254 + .../vishvananda/netlink/conntrack_linux.go | 344 + .../vishvananda/netlink/conntrack_unspecified.go | 53 + vendor/github.com/vishvananda/netlink/filter.go | 283 + .../github.com/vishvananda/netlink/filter_linux.go | 591 + .../vishvananda/netlink/genetlink_linux.go | 167 + .../vishvananda/netlink/genetlink_unspecified.go | 25 + vendor/github.com/vishvananda/netlink/gtp_linux.go | 238 + .../github.com/vishvananda/netlink/handle_linux.go | 111 + .../vishvananda/netlink/handle_unspecified.go | 218 + vendor/github.com/vishvananda/netlink/link.go | 773 + .../github.com/vishvananda/netlink/link_linux.go | 1798 ++ .../vishvananda/netlink/link_tuntap_linux.go | 14 + vendor/github.com/vishvananda/netlink/neigh.go | 22 + .../github.com/vishvananda/netlink/neigh_linux.go | 250 + vendor/github.com/vishvananda/netlink/netlink.go | 39 + .../vishvananda/netlink/netlink_linux.go | 11 + .../vishvananda/netlink/netlink_unspecified.go | 221 + .../vishvananda/netlink/nl/addr_linux.go | 76 + .../vishvananda/netlink/nl/conntrack_linux.go | 189 + .../vishvananda/netlink/nl/genetlink_linux.go | 89 + .../vishvananda/netlink/nl/link_linux.go | 524 + .../vishvananda/netlink/nl/mpls_linux.go | 36 + .../github.com/vishvananda/netlink/nl/nl_linux.go | 718 + .../vishvananda/netlink/nl/nl_unspecified.go | 11 + .../vishvananda/netlink/nl/route_linux.go | 80 + .../github.com/vishvananda/netlink/nl/syscall.go | 68 + .../github.com/vishvananda/netlink/nl/tc_linux.go | 675 + .../vishvananda/netlink/nl/xfrm_linux.go | 296 + .../vishvananda/netlink/nl/xfrm_monitor_linux.go | 32 + .../vishvananda/netlink/nl/xfrm_policy_linux.go | 119 + .../vishvananda/netlink/nl/xfrm_state_linux.go | 334 + vendor/github.com/vishvananda/netlink/order.go | 32 + vendor/github.com/vishvananda/netlink/protinfo.go | 58 + .../vishvananda/netlink/protinfo_linux.go | 74 + vendor/github.com/vishvananda/netlink/qdisc.go | 232 + .../github.com/vishvananda/netlink/qdisc_linux.go | 527 + vendor/github.com/vishvananda/netlink/route.go | 116 + .../github.com/vishvananda/netlink/route_linux.go | 674 + .../vishvananda/netlink/route_unspecified.go | 11 + vendor/github.com/vishvananda/netlink/rule.go | 40 + .../github.com/vishvananda/netlink/rule_linux.go | 221 + vendor/github.com/vishvananda/netlink/socket.go | 27 + .../github.com/vishvananda/netlink/socket_linux.go | 159 + vendor/github.com/vishvananda/netlink/xfrm.go | 74 + .../vishvananda/netlink/xfrm_monitor_linux.go | 98 + .../github.com/vishvananda/netlink/xfrm_policy.go | 74 + .../vishvananda/netlink/xfrm_policy_linux.go | 257 + .../github.com/vishvananda/netlink/xfrm_state.go | 108 + .../vishvananda/netlink/xfrm_state_linux.go | 444 + vendor/github.com/vishvananda/netns/LICENSE | 192 + vendor/github.com/vishvananda/netns/README.md | 49 + vendor/github.com/vishvananda/netns/netns.go | 67 + vendor/github.com/vishvananda/netns/netns_linux.go | 205 + .../vishvananda/netns/netns_linux_386.go | 7 + .../vishvananda/netns/netns_linux_amd64.go | 7 + .../vishvananda/netns/netns_linux_arm.go | 7 + .../vishvananda/netns/netns_linux_arm64.go | 7 + .../vishvananda/netns/netns_linux_ppc64le.go | 7 + .../vishvananda/netns/netns_linux_s390x.go | 7 + .../vishvananda/netns/netns_unspecified.go | 35 + .../xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt | 202 + vendor/github.com/xeipuuv/gojsonpointer/README.md | 8 + vendor/github.com/xeipuuv/gojsonpointer/pointer.go | 217 + .../xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt | 202 + .../github.com/xeipuuv/gojsonreference/README.md | 10 + .../xeipuuv/gojsonreference/reference.go | 141 + .../xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt | 202 + vendor/github.com/xeipuuv/gojsonschema/README.md | 236 + vendor/github.com/xeipuuv/gojsonschema/errors.go | 242 + .../xeipuuv/gojsonschema/format_checkers.go | 178 + .../github.com/xeipuuv/gojsonschema/internalLog.go | 37 + .../github.com/xeipuuv/gojsonschema/jsonContext.go | 72 + .../github.com/xeipuuv/gojsonschema/jsonLoader.go | 286 + vendor/github.com/xeipuuv/gojsonschema/locales.go | 275 + vendor/github.com/xeipuuv/gojsonschema/result.go | 171 + vendor/github.com/xeipuuv/gojsonschema/schema.go | 908 + .../github.com/xeipuuv/gojsonschema/schemaPool.go | 107 + .../xeipuuv/gojsonschema/schemaReferencePool.go | 67 + .../github.com/xeipuuv/gojsonschema/schemaType.go | 83 + .../github.com/xeipuuv/gojsonschema/subSchema.go | 227 + vendor/github.com/xeipuuv/gojsonschema/types.go | 58 + vendor/github.com/xeipuuv/gojsonschema/utils.go | 202 + .../github.com/xeipuuv/gojsonschema/validation.go | 829 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/README | 3 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 207 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 43 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 762 + .../golang.org/x/crypto/blake2b/blake2b_amd64.go | 25 + vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s | 290 + .../golang.org/x/crypto/blake2b/blake2b_generic.go | 179 + vendor/golang.org/x/crypto/blake2b/blake2b_ref.go | 11 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 + vendor/golang.org/x/crypto/blake2b/register.go | 32 + .../golang.org/x/crypto/curve25519/const_amd64.h | 8 + .../golang.org/x/crypto/curve25519/const_amd64.s | 20 + .../golang.org/x/crypto/curve25519/cswap_amd64.s | 65 + .../golang.org/x/crypto/curve25519/curve25519.go | 834 + vendor/golang.org/x/crypto/curve25519/doc.go | 23 + .../golang.org/x/crypto/curve25519/freeze_amd64.s | 73 + .../x/crypto/curve25519/ladderstep_amd64.s | 1377 ++ .../x/crypto/curve25519/mont25519_amd64.go | 240 + vendor/golang.org/x/crypto/curve25519/mul_amd64.s | 169 + .../golang.org/x/crypto/curve25519/square_amd64.s | 132 + vendor/golang.org/x/crypto/md4/md4.go | 118 + vendor/golang.org/x/crypto/md4/md4block.go | 89 + .../x/crypto/nacl/secretbox/secretbox.go | 149 + vendor/golang.org/x/crypto/pkcs12/bmp-string.go | 50 + vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 + vendor/golang.org/x/crypto/pkcs12/errors.go | 23 + .../golang.org/x/crypto/pkcs12/internal/rc2/rc2.go | 274 + vendor/golang.org/x/crypto/pkcs12/mac.go | 45 + vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 + vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 346 + vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 + vendor/golang.org/x/crypto/poly1305/poly1305.go | 33 + vendor/golang.org/x/crypto/poly1305/sum_amd64.go | 22 + vendor/golang.org/x/crypto/poly1305/sum_amd64.s | 125 + vendor/golang.org/x/crypto/poly1305/sum_arm.go | 22 + vendor/golang.org/x/crypto/poly1305/sum_arm.s | 427 + vendor/golang.org/x/crypto/poly1305/sum_ref.go | 141 + .../golang.org/x/crypto/salsa20/salsa/hsalsa20.go | 144 + .../x/crypto/salsa20/salsa/salsa2020_amd64.s | 889 + .../golang.org/x/crypto/salsa20/salsa/salsa208.go | 199 + .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 234 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/README | 3 + vendor/golang.org/x/net/context/context.go | 156 + vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go | 74 + .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/net/http2/README | 20 + vendor/golang.org/x/net/http2/ciphers.go | 641 + vendor/golang.org/x/net/http2/client_conn_pool.go | 256 + .../golang.org/x/net/http2/configure_transport.go | 80 + vendor/golang.org/x/net/http2/databuffer.go | 146 + vendor/golang.org/x/net/http2/errors.go | 130 + vendor/golang.org/x/net/http2/flow.go | 50 + vendor/golang.org/x/net/http2/frame.go | 1579 ++ vendor/golang.org/x/net/http2/go16.go | 16 + vendor/golang.org/x/net/http2/go17.go | 106 + vendor/golang.org/x/net/http2/go17_not18.go | 36 + vendor/golang.org/x/net/http2/go18.go | 54 + vendor/golang.org/x/net/http2/go19.go | 16 + vendor/golang.org/x/net/http2/gotrack.go | 170 + vendor/golang.org/x/net/http2/headermap.go | 78 + vendor/golang.org/x/net/http2/hpack/encode.go | 240 + vendor/golang.org/x/net/http2/hpack/hpack.go | 490 + vendor/golang.org/x/net/http2/hpack/huffman.go | 212 + vendor/golang.org/x/net/http2/hpack/tables.go | 479 + vendor/golang.org/x/net/http2/http2.go | 387 + vendor/golang.org/x/net/http2/not_go16.go | 21 + vendor/golang.org/x/net/http2/not_go17.go | 87 + vendor/golang.org/x/net/http2/not_go18.go | 27 + vendor/golang.org/x/net/http2/not_go19.go | 16 + vendor/golang.org/x/net/http2/pipe.go | 163 + vendor/golang.org/x/net/http2/server.go | 2842 +++ vendor/golang.org/x/net/http2/transport.go | 2123 +++ vendor/golang.org/x/net/http2/write.go | 370 + vendor/golang.org/x/net/http2/writesched.go | 242 + .../golang.org/x/net/http2/writesched_priority.go | 452 + vendor/golang.org/x/net/http2/writesched_random.go | 72 + vendor/golang.org/x/net/idna/idna.go | 668 + vendor/golang.org/x/net/idna/punycode.go | 203 + vendor/golang.org/x/net/idna/tables.go | 4477 +++++ vendor/golang.org/x/net/idna/trie.go | 72 + vendor/golang.org/x/net/idna/trieval.go | 114 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/lex/httplex/httplex.go | 351 + vendor/golang.org/x/net/proxy/direct.go | 18 + vendor/golang.org/x/net/proxy/per_host.go | 140 + vendor/golang.org/x/net/proxy/proxy.go | 94 + vendor/golang.org/x/net/proxy/socks5.go | 213 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1066 ++ vendor/golang.org/x/net/trace/trace_go16.go | 21 + vendor/golang.org/x/net/trace/trace_go17.go | 21 + vendor/golang.org/x/net/websocket/client.go | 106 + vendor/golang.org/x/net/websocket/dial.go | 24 + vendor/golang.org/x/net/websocket/hybi.go | 583 + vendor/golang.org/x/net/websocket/server.go | 113 + vendor/golang.org/x/net/websocket/websocket.go | 448 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 65 + vendor/golang.org/x/oauth2/client_appengine.go | 25 + vendor/golang.org/x/oauth2/google/appengine.go | 89 + .../golang.org/x/oauth2/google/appengine_hook.go | 14 + .../golang.org/x/oauth2/google/appenginevm_hook.go | 15 + vendor/golang.org/x/oauth2/google/default.go | 130 + vendor/golang.org/x/oauth2/google/google.go | 202 + vendor/golang.org/x/oauth2/google/jwt.go | 74 + vendor/golang.org/x/oauth2/google/sdk.go | 172 + vendor/golang.org/x/oauth2/internal/oauth2.go | 76 + vendor/golang.org/x/oauth2/internal/token.go | 227 + vendor/golang.org/x/oauth2/internal/transport.go | 69 + vendor/golang.org/x/oauth2/jws/jws.go | 182 + vendor/golang.org/x/oauth2/jwt/jwt.go | 157 + vendor/golang.org/x/oauth2/oauth2.go | 341 + vendor/golang.org/x/oauth2/token.go | 158 + vendor/golang.org/x/oauth2/transport.go | 132 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + vendor/golang.org/x/sync/README | 2 + vendor/golang.org/x/sync/errgroup/errgroup.go | 67 + .../golang.org/x/sync/singleflight/singleflight.go | 111 + vendor/golang.org/x/sync/syncmap/map.go | 372 + vendor/golang.org/x/sys/LICENSE | 27 + vendor/golang.org/x/sys/PATENTS | 22 + vendor/golang.org/x/sys/README | 3 + vendor/golang.org/x/sys/unix/README.md | 173 + vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 + vendor/golang.org/x/sys/unix/asm_darwin_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 + vendor/golang.org/x/sys/unix/asm_darwin_arm64.s | 30 + vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_freebsd_386.s | 29 + vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_freebsd_arm.s | 29 + vendor/golang.org/x/sys/unix/asm_linux_386.s | 35 + vendor/golang.org/x/sys/unix/asm_linux_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 29 + vendor/golang.org/x/sys/unix/asm_linux_arm64.s | 24 + vendor/golang.org/x/sys/unix/asm_linux_mips64x.s | 28 + vendor/golang.org/x/sys/unix/asm_linux_mipsx.s | 31 + vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s | 28 + vendor/golang.org/x/sys/unix/asm_linux_s390x.s | 28 + vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 + vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 + vendor/golang.org/x/sys/unix/asm_openbsd_386.s | 29 + vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_openbsd_arm.s | 29 + vendor/golang.org/x/sys/unix/asm_solaris_amd64.s | 17 + vendor/golang.org/x/sys/unix/bluetooth_linux.go | 35 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 + vendor/golang.org/x/sys/unix/constants.go | 13 + vendor/golang.org/x/sys/unix/dev_linux.go | 42 + vendor/golang.org/x/sys/unix/dirent.go | 102 + vendor/golang.org/x/sys/unix/endian_big.go | 9 + vendor/golang.org/x/sys/unix/endian_little.go | 9 + vendor/golang.org/x/sys/unix/env_unix.go | 27 + vendor/golang.org/x/sys/unix/env_unset.go | 14 + vendor/golang.org/x/sys/unix/errors_freebsd_386.go | 227 + .../golang.org/x/sys/unix/errors_freebsd_amd64.go | 227 + vendor/golang.org/x/sys/unix/errors_freebsd_arm.go | 226 + vendor/golang.org/x/sys/unix/file_unix.go | 27 + vendor/golang.org/x/sys/unix/flock.go | 22 + vendor/golang.org/x/sys/unix/flock_linux_32bit.go | 13 + vendor/golang.org/x/sys/unix/gccgo.go | 46 + vendor/golang.org/x/sys/unix/gccgo_c.c | 41 + vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go | 20 + .../golang.org/x/sys/unix/gccgo_linux_sparc64.go | 20 + vendor/golang.org/x/sys/unix/openbsd_pledge.go | 38 + vendor/golang.org/x/sys/unix/race.go | 30 + vendor/golang.org/x/sys/unix/race0.go | 25 + vendor/golang.org/x/sys/unix/sockcmsg_linux.go | 36 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 104 + vendor/golang.org/x/sys/unix/str.go | 26 + vendor/golang.org/x/sys/unix/syscall.go | 69 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 635 + vendor/golang.org/x/sys/unix/syscall_darwin.go | 536 + vendor/golang.org/x/sys/unix/syscall_darwin_386.go | 77 + .../golang.org/x/sys/unix/syscall_darwin_amd64.go | 77 + vendor/golang.org/x/sys/unix/syscall_darwin_arm.go | 71 + .../golang.org/x/sys/unix/syscall_darwin_arm64.go | 77 + vendor/golang.org/x/sys/unix/syscall_dragonfly.go | 415 + .../x/sys/unix/syscall_dragonfly_amd64.go | 61 + vendor/golang.org/x/sys/unix/syscall_freebsd.go | 708 + .../golang.org/x/sys/unix/syscall_freebsd_386.go | 61 + .../golang.org/x/sys/unix/syscall_freebsd_amd64.go | 61 + .../golang.org/x/sys/unix/syscall_freebsd_arm.go | 61 + vendor/golang.org/x/sys/unix/syscall_linux.go | 1459 ++ vendor/golang.org/x/sys/unix/syscall_linux_386.go | 399 + .../golang.org/x/sys/unix/syscall_linux_amd64.go | 152 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + vendor/golang.org/x/sys/unix/syscall_linux_arm.go | 263 + .../golang.org/x/sys/unix/syscall_linux_arm64.go | 191 + .../golang.org/x/sys/unix/syscall_linux_mips64x.go | 209 + .../golang.org/x/sys/unix/syscall_linux_mipsx.go | 239 + .../golang.org/x/sys/unix/syscall_linux_ppc64x.go | 135 + .../golang.org/x/sys/unix/syscall_linux_s390x.go | 328 + .../golang.org/x/sys/unix/syscall_linux_sparc64.go | 169 + vendor/golang.org/x/sys/unix/syscall_netbsd.go | 472 + vendor/golang.org/x/sys/unix/syscall_netbsd_386.go | 42 + .../golang.org/x/sys/unix/syscall_netbsd_amd64.go | 42 + vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go | 42 + vendor/golang.org/x/sys/unix/syscall_no_getwd.go | 11 + vendor/golang.org/x/sys/unix/syscall_openbsd.go | 282 + .../golang.org/x/sys/unix/syscall_openbsd_386.go | 42 + .../golang.org/x/sys/unix/syscall_openbsd_amd64.go | 42 + .../golang.org/x/sys/unix/syscall_openbsd_arm.go | 44 + vendor/golang.org/x/sys/unix/syscall_solaris.go | 716 + .../golang.org/x/sys/unix/syscall_solaris_amd64.go | 35 + vendor/golang.org/x/sys/unix/syscall_unix.go | 293 + vendor/golang.org/x/sys/unix/syscall_unix_gc.go | 15 + vendor/golang.org/x/sys/unix/zerrors_darwin_386.go | 1673 ++ .../golang.org/x/sys/unix/zerrors_darwin_amd64.go | 1673 ++ vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go | 1673 ++ .../golang.org/x/sys/unix/zerrors_darwin_arm64.go | 1673 ++ .../x/sys/unix/zerrors_dragonfly_amd64.go | 1568 ++ .../golang.org/x/sys/unix/zerrors_freebsd_386.go | 1706 ++ .../golang.org/x/sys/unix/zerrors_freebsd_amd64.go | 1707 ++ .../golang.org/x/sys/unix/zerrors_freebsd_arm.go | 1715 ++ vendor/golang.org/x/sys/unix/zerrors_linux_386.go | 2187 +++ .../golang.org/x/sys/unix/zerrors_linux_amd64.go | 2188 +++ vendor/golang.org/x/sys/unix/zerrors_linux_arm.go | 2192 +++ .../golang.org/x/sys/unix/zerrors_linux_arm64.go | 2177 +++ vendor/golang.org/x/sys/unix/zerrors_linux_mips.go | 2196 +++ .../golang.org/x/sys/unix/zerrors_linux_mips64.go | 2196 +++ .../x/sys/unix/zerrors_linux_mips64le.go | 2196 +++ .../golang.org/x/sys/unix/zerrors_linux_mipsle.go | 2196 +++ .../golang.org/x/sys/unix/zerrors_linux_ppc64.go | 2250 +++ .../golang.org/x/sys/unix/zerrors_linux_ppc64le.go | 2250 +++ .../golang.org/x/sys/unix/zerrors_linux_s390x.go | 2249 +++ .../golang.org/x/sys/unix/zerrors_linux_sparc64.go | 2142 +++ vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go | 1712 ++ .../golang.org/x/sys/unix/zerrors_netbsd_amd64.go | 1702 ++ vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go | 1691 ++ .../golang.org/x/sys/unix/zerrors_openbsd_386.go | 1584 ++ .../golang.org/x/sys/unix/zerrors_openbsd_amd64.go | 1583 ++ .../golang.org/x/sys/unix/zerrors_openbsd_arm.go | 1586 ++ .../golang.org/x/sys/unix/zerrors_solaris_amd64.go | 1483 ++ .../golang.org/x/sys/unix/zsyscall_darwin_386.go | 1609 ++ .../golang.org/x/sys/unix/zsyscall_darwin_amd64.go | 1609 ++ .../golang.org/x/sys/unix/zsyscall_darwin_arm.go | 1609 ++ .../golang.org/x/sys/unix/zsyscall_darwin_arm64.go | 1609 ++ .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1440 ++ .../golang.org/x/sys/unix/zsyscall_freebsd_386.go | 1877 ++ .../x/sys/unix/zsyscall_freebsd_amd64.go | 1877 ++ .../golang.org/x/sys/unix/zsyscall_freebsd_arm.go | 1877 ++ vendor/golang.org/x/sys/unix/zsyscall_linux_386.go | 1953 +++ .../golang.org/x/sys/unix/zsyscall_linux_amd64.go | 2146 +++ vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go | 2055 +++ .../golang.org/x/sys/unix/zsyscall_linux_arm64.go | 2039 +++ .../golang.org/x/sys/unix/zsyscall_linux_mips.go | 2111 +++ .../golang.org/x/sys/unix/zsyscall_linux_mips64.go | 2105 +++ .../x/sys/unix/zsyscall_linux_mips64le.go | 2105 +++ .../golang.org/x/sys/unix/zsyscall_linux_mipsle.go | 2111 +++ .../golang.org/x/sys/unix/zsyscall_linux_ppc64.go | 2157 +++ .../x/sys/unix/zsyscall_linux_ppc64le.go | 2157 +++ .../golang.org/x/sys/unix/zsyscall_linux_s390x.go | 1937 +++ .../x/sys/unix/zsyscall_linux_sparc64.go | 1833 ++ .../golang.org/x/sys/unix/zsyscall_netbsd_386.go | 1346 ++ .../golang.org/x/sys/unix/zsyscall_netbsd_amd64.go | 1346 ++ .../golang.org/x/sys/unix/zsyscall_netbsd_arm.go | 1346 ++ .../golang.org/x/sys/unix/zsyscall_openbsd_386.go | 1404 ++ .../x/sys/unix/zsyscall_openbsd_amd64.go | 1404 ++ .../golang.org/x/sys/unix/zsyscall_openbsd_arm.go | 1404 ++ .../x/sys/unix/zsyscall_solaris_amd64.go | 1600 ++ vendor/golang.org/x/sys/unix/zsysctl_openbsd.go | 270 + vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go | 398 + .../golang.org/x/sys/unix/zsysnum_darwin_amd64.go | 398 + vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go | 426 + .../golang.org/x/sys/unix/zsysnum_darwin_arm64.go | 426 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 + .../golang.org/x/sys/unix/zsysnum_freebsd_386.go | 353 + .../golang.org/x/sys/unix/zsysnum_freebsd_amd64.go | 353 + .../golang.org/x/sys/unix/zsysnum_freebsd_arm.go | 353 + vendor/golang.org/x/sys/unix/zsysnum_linux_386.go | 388 + .../golang.org/x/sys/unix/zsysnum_linux_amd64.go | 341 + vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go | 361 + .../golang.org/x/sys/unix/zsysnum_linux_arm64.go | 285 + vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go | 374 + .../golang.org/x/sys/unix/zsysnum_linux_mips64.go | 334 + .../x/sys/unix/zsysnum_linux_mips64le.go | 334 + .../golang.org/x/sys/unix/zsysnum_linux_mipsle.go | 374 + .../golang.org/x/sys/unix/zsysnum_linux_ppc64.go | 369 + .../golang.org/x/sys/unix/zsysnum_linux_ppc64le.go | 369 + .../golang.org/x/sys/unix/zsysnum_linux_s390x.go | 331 + .../golang.org/x/sys/unix/zsysnum_linux_sparc64.go | 348 + vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go | 274 + .../golang.org/x/sys/unix/zsysnum_netbsd_amd64.go | 274 + vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go | 274 + .../golang.org/x/sys/unix/zsysnum_openbsd_386.go | 207 + .../golang.org/x/sys/unix/zsysnum_openbsd_amd64.go | 207 + .../golang.org/x/sys/unix/zsysnum_openbsd_arm.go | 213 + .../golang.org/x/sys/unix/zsysnum_solaris_amd64.go | 13 + vendor/golang.org/x/sys/unix/ztypes_darwin_386.go | 462 + .../golang.org/x/sys/unix/ztypes_darwin_amd64.go | 472 + vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go | 463 + .../golang.org/x/sys/unix/ztypes_darwin_arm64.go | 471 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 448 + vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go | 521 + .../golang.org/x/sys/unix/ztypes_freebsd_amd64.go | 524 + vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go | 524 + vendor/golang.org/x/sys/unix/ztypes_linux_386.go | 762 + vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go | 780 + vendor/golang.org/x/sys/unix/ztypes_linux_arm.go | 751 + vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go | 759 + vendor/golang.org/x/sys/unix/ztypes_linux_mips.go | 756 + .../golang.org/x/sys/unix/ztypes_linux_mips64.go | 761 + .../golang.org/x/sys/unix/ztypes_linux_mips64le.go | 761 + .../golang.org/x/sys/unix/ztypes_linux_mipsle.go | 756 + vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go | 769 + .../golang.org/x/sys/unix/ztypes_linux_ppc64le.go | 769 + vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go | 786 + .../golang.org/x/sys/unix/ztypes_linux_sparc64.go | 666 + vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go | 401 + .../golang.org/x/sys/unix/ztypes_netbsd_amd64.go | 408 + vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go | 406 + vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go | 446 + .../golang.org/x/sys/unix/ztypes_openbsd_amd64.go | 453 + vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go | 439 + .../golang.org/x/sys/unix/ztypes_solaris_amd64.go | 442 + vendor/golang.org/x/sys/windows/asm_windows_386.s | 13 + .../golang.org/x/sys/windows/asm_windows_amd64.s | 13 + vendor/golang.org/x/sys/windows/dll_windows.go | 377 + vendor/golang.org/x/sys/windows/env_unset.go | 15 + vendor/golang.org/x/sys/windows/env_windows.go | 25 + vendor/golang.org/x/sys/windows/eventlog.go | 20 + vendor/golang.org/x/sys/windows/exec_windows.go | 97 + vendor/golang.org/x/sys/windows/memory_windows.go | 26 + vendor/golang.org/x/sys/windows/mksyscall.go | 7 + vendor/golang.org/x/sys/windows/race.go | 30 + vendor/golang.org/x/sys/windows/race0.go | 25 + vendor/golang.org/x/sys/windows/registry/key.go | 200 + .../golang.org/x/sys/windows/registry/mksyscall.go | 7 + .../golang.org/x/sys/windows/registry/syscall.go | 32 + vendor/golang.org/x/sys/windows/registry/value.go | 384 + .../x/sys/windows/registry/zsyscall_windows.go | 120 + .../golang.org/x/sys/windows/security_windows.go | 435 + vendor/golang.org/x/sys/windows/service.go | 164 + vendor/golang.org/x/sys/windows/str.go | 22 + vendor/golang.org/x/sys/windows/svc/debug/log.go | 56 + .../golang.org/x/sys/windows/svc/debug/service.go | 45 + vendor/golang.org/x/sys/windows/svc/event.go | 48 + .../x/sys/windows/svc/eventlog/install.go | 80 + .../golang.org/x/sys/windows/svc/eventlog/log.go | 70 + vendor/golang.org/x/sys/windows/svc/go12.c | 24 + vendor/golang.org/x/sys/windows/svc/go12.go | 11 + vendor/golang.org/x/sys/windows/svc/go13.go | 31 + vendor/golang.org/x/sys/windows/svc/mgr/config.go | 139 + vendor/golang.org/x/sys/windows/svc/mgr/mgr.go | 162 + vendor/golang.org/x/sys/windows/svc/mgr/service.go | 72 + vendor/golang.org/x/sys/windows/svc/security.go | 62 + vendor/golang.org/x/sys/windows/svc/service.go | 363 + vendor/golang.org/x/sys/windows/svc/sys_386.s | 68 + vendor/golang.org/x/sys/windows/svc/sys_amd64.s | 42 + vendor/golang.org/x/sys/windows/syscall.go | 71 + vendor/golang.org/x/sys/windows/syscall_windows.go | 1004 ++ vendor/golang.org/x/sys/windows/types_windows.go | 1282 ++ .../golang.org/x/sys/windows/types_windows_386.go | 22 + .../x/sys/windows/types_windows_amd64.go | 22 + .../golang.org/x/sys/windows/zsyscall_windows.go | 2428 +++ vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + vendor/golang.org/x/text/README | 23 + .../golang.org/x/text/secure/bidirule/bidirule.go | 342 + vendor/golang.org/x/text/transform/transform.go | 705 + vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 + vendor/golang.org/x/text/unicode/bidi/bracket.go | 335 + vendor/golang.org/x/text/unicode/bidi/core.go | 1058 ++ vendor/golang.org/x/text/unicode/bidi/prop.go | 206 + vendor/golang.org/x/text/unicode/bidi/tables.go | 1779 ++ vendor/golang.org/x/text/unicode/bidi/trieval.go | 60 + .../golang.org/x/text/unicode/norm/composition.go | 514 + vendor/golang.org/x/text/unicode/norm/forminfo.go | 256 + vendor/golang.org/x/text/unicode/norm/input.go | 105 + vendor/golang.org/x/text/unicode/norm/iter.go | 450 + vendor/golang.org/x/text/unicode/norm/normalize.go | 608 + .../golang.org/x/text/unicode/norm/readwriter.go | 125 + vendor/golang.org/x/text/unicode/norm/tables.go | 7627 ++++++++ vendor/golang.org/x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + vendor/golang.org/x/time/LICENSE | 27 + vendor/golang.org/x/time/PATENTS | 22 + vendor/golang.org/x/time/README | 1 + vendor/golang.org/x/time/rate/rate.go | 368 + vendor/google.golang.org/api/LICENSE | 27 + vendor/google.golang.org/api/README.md | 92 + .../api/googleapi/transport/apikey.go | 38 + vendor/google.golang.org/api/internal/pool.go | 59 + vendor/google.golang.org/api/internal/settings.go | 23 + vendor/google.golang.org/api/iterator/iterator.go | 231 + vendor/google.golang.org/api/option/option.go | 142 + .../api/support/bundler/bundler.go | 261 + vendor/google.golang.org/api/transport/dial.go | 134 + .../api/transport/dial_appengine.go | 34 + vendor/google.golang.org/genproto/LICENSE | 202 + vendor/google.golang.org/genproto/README.md | 28 + .../googleapis/api/annotations/annotations.pb.go | 65 + .../genproto/googleapis/api/annotations/http.pb.go | 567 + .../googleapis/api/distribution/distribution.pb.go | 507 + .../genproto/googleapis/api/label/label.pb.go | 120 + .../genproto/googleapis/api/metric/metric.pb.go | 359 + .../api/monitoredres/monitored_resource.pb.go | 181 + .../googleapis/logging/type/http_request.pb.go | 224 + .../googleapis/logging/type/log_severity.pb.go | 111 + .../genproto/googleapis/logging/v2/log_entry.pb.go | 501 + .../genproto/googleapis/logging/v2/logging.pb.go | 762 + .../googleapis/logging/v2/logging_config.pb.go | 735 + .../googleapis/logging/v2/logging_metrics.pb.go | 565 + .../genproto/googleapis/rpc/status/status.pb.go | 144 + vendor/google.golang.org/grpc/LICENSE | 28 + vendor/google.golang.org/grpc/PATENTS | 22 + vendor/google.golang.org/grpc/README.md | 41 + vendor/google.golang.org/grpc/backoff.go | 80 + vendor/google.golang.org/grpc/balancer.go | 400 + vendor/google.golang.org/grpc/call.go | 306 + vendor/google.golang.org/grpc/clientconn.go | 1030 ++ vendor/google.golang.org/grpc/codec.go | 118 + vendor/google.golang.org/grpc/codes/code_string.go | 16 + vendor/google.golang.org/grpc/codes/codes.go | 159 + .../grpc/credentials/credentials.go | 234 + .../grpc/credentials/credentials_util_go17.go | 75 + .../grpc/credentials/credentials_util_go18.go | 53 + .../grpc/credentials/credentials_util_pre_go17.go | 72 + .../grpc/credentials/oauth/oauth.go | 180 + vendor/google.golang.org/grpc/doc.go | 6 + vendor/google.golang.org/grpc/go16.go | 56 + vendor/google.golang.org/grpc/go17.go | 55 + vendor/google.golang.org/grpc/grpclb.go | 749 + .../grpc/grpclb/grpc_lb_v1/grpclb.pb.go | 629 + .../grpc/grpclb/grpc_lb_v1/grpclb.proto | 179 + vendor/google.golang.org/grpc/grpclog/logger.go | 93 + .../grpc/health/grpc_health_v1/health.pb.go | 176 + .../grpc/health/grpc_health_v1/health.proto | 20 + vendor/google.golang.org/grpc/health/health.go | 52 + vendor/google.golang.org/grpc/interceptor.go | 90 + vendor/google.golang.org/grpc/internal/internal.go | 49 + .../google.golang.org/grpc/keepalive/keepalive.go | 80 + vendor/google.golang.org/grpc/metadata/metadata.go | 144 + vendor/google.golang.org/grpc/naming/naming.go | 74 + vendor/google.golang.org/grpc/peer/peer.go | 65 + vendor/google.golang.org/grpc/proxy.go | 145 + vendor/google.golang.org/grpc/rpc_util.go | 501 + vendor/google.golang.org/grpc/server.go | 1108 ++ vendor/google.golang.org/grpc/stats/handlers.go | 76 + vendor/google.golang.org/grpc/stats/stats.go | 223 + vendor/google.golang.org/grpc/status/status.go | 145 + vendor/google.golang.org/grpc/stream.go | 633 + vendor/google.golang.org/grpc/tap/tap.go | 54 + vendor/google.golang.org/grpc/trace.go | 119 + vendor/google.golang.org/grpc/transport/control.go | 207 + vendor/google.golang.org/grpc/transport/go16.go | 46 + vendor/google.golang.org/grpc/transport/go17.go | 46 + .../grpc/transport/handler_server.go | 406 + .../grpc/transport/http2_client.go | 1248 ++ .../grpc/transport/http2_server.go | 1070 ++ .../google.golang.org/grpc/transport/http_util.go | 549 + .../google.golang.org/grpc/transport/transport.go | 666 + vendor/gopkg.in/yaml.v2/LICENSE | 13 + vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/README.md | 131 + vendor/gopkg.in/yaml.v2/apic.go | 742 + vendor/gopkg.in/yaml.v2/decode.go | 683 + vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ++ vendor/gopkg.in/yaml.v2/encode.go | 306 + vendor/gopkg.in/yaml.v2/parserc.go | 1096 ++ vendor/gopkg.in/yaml.v2/readerc.go | 394 + vendor/gopkg.in/yaml.v2/resolve.go | 208 + vendor/gopkg.in/yaml.v2/scannerc.go | 2710 +++ vendor/gopkg.in/yaml.v2/sorter.go | 104 + vendor/gopkg.in/yaml.v2/writerc.go | 89 + vendor/gopkg.in/yaml.v2/yaml.go | 346 + vendor/gopkg.in/yaml.v2/yamlh.go | 716 + vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 + volume/drivers/adapter.go | 184 + volume/drivers/extpoint.go | 217 + volume/drivers/extpoint_test.go | 23 + volume/drivers/proxy.go | 242 + volume/drivers/proxy_test.go | 132 + volume/local/local.go | 387 + volume/local/local_test.go | 345 + volume/local/local_unix.go | 99 + volume/local/local_windows.go | 46 + volume/store/db.go | 88 + volume/store/errors.go | 76 + volume/store/restore.go | 83 + volume/store/store.go | 669 + volume/store/store_test.go | 234 + volume/store/store_unix.go | 9 + volume/store/store_windows.go | 12 + volume/testutils/testutils.go | 123 + volume/validate.go | 140 + volume/validate_test.go | 43 + volume/validate_test_unix.go | 8 + volume/validate_test_windows.go | 6 + volume/volume.go | 374 + volume/volume_copy.go | 23 + volume/volume_copy_unix.go | 8 + volume/volume_copy_windows.go | 6 + volume/volume_linux.go | 56 + volume/volume_linux_test.go | 51 + volume/volume_propagation_linux.go | 47 + volume/volume_propagation_linux_test.go | 65 + volume/volume_propagation_unsupported.go | 24 + volume/volume_test.go | 269 + volume/volume_unix.go | 148 + volume/volume_unsupported.go | 16 + volume/volume_windows.go | 201 + www/static/balena.svg | 65 + www/static/favicon.ico | Bin 0 -> 15086 bytes www/static/features/bandwidth.svg | 22 + www/static/features/failure-resistant.svg | 29 + www/static/features/footprint.svg | 28 + www/static/features/multiple.svg | 24 + www/static/features/storage.svg | 25 + www/static/features/undisturbed.svg | 15 + 4691 files changed, 992622 insertions(+), 94 deletions(-) create mode 100644 AUTHORS create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile create mode 100644 Dockerfile.aarch64 create mode 100644 Dockerfile.armhf create mode 100644 Dockerfile.build.aarch64 create mode 100644 Dockerfile.build.arm create mode 100644 Dockerfile.build.i386 create mode 100644 Dockerfile.build.x86_64 create mode 100644 Dockerfile.ppc64le create mode 100644 Dockerfile.s390x create mode 100644 Dockerfile.simple create mode 100644 Dockerfile.solaris create mode 100644 Dockerfile.windows create mode 100644 FAQ.md create mode 100644 LICENSE create mode 100644 MAINTAINERS create mode 100644 Makefile create mode 100644 NOTICE create mode 100644 ROADMAP.md create mode 100644 VENDORING.md create mode 100644 VERSION create mode 100644 api/README.md create mode 100644 api/common.go create mode 100644 api/common_test.go create mode 100644 api/common_unix.go create mode 100644 api/common_windows.go create mode 100644 api/errors/errors.go create mode 100644 api/errors/errors_test.go create mode 100644 api/fixtures/keyfile create mode 100644 api/names.go create mode 100644 api/server/backend/build/backend.go create mode 100644 api/server/backend/build/tag.go create mode 100644 api/server/httputils/decoder.go create mode 100644 api/server/httputils/errors.go create mode 100644 api/server/httputils/form.go create mode 100644 api/server/httputils/form_test.go create mode 100644 api/server/httputils/httputils.go create mode 100644 api/server/httputils/httputils_test.go create mode 100644 api/server/httputils/httputils_write_json.go create mode 100644 api/server/httputils/write_log_stream.go create mode 100644 api/server/middleware.go create mode 100644 api/server/middleware/cors.go create mode 100644 api/server/middleware/debug.go create mode 100644 api/server/middleware/experimental.go create mode 100644 api/server/middleware/middleware.go create mode 100644 api/server/middleware/version.go create mode 100644 api/server/middleware/version_test.go create mode 100644 api/server/profiler.go create mode 100644 api/server/router/build/backend.go create mode 100644 api/server/router/build/build.go create mode 100644 api/server/router/build/build_routes.go create mode 100644 api/server/router/checkpoint/backend.go create mode 100644 api/server/router/checkpoint/checkpoint.go create mode 100644 api/server/router/checkpoint/checkpoint_routes.go create mode 100644 api/server/router/container/backend.go create mode 100644 api/server/router/container/container.go create mode 100644 api/server/router/container/container_routes.go create mode 100644 api/server/router/container/copy.go create mode 100644 api/server/router/container/exec.go create mode 100644 api/server/router/container/inspect.go create mode 100644 api/server/router/distribution/backend.go create mode 100644 api/server/router/distribution/distribution.go create mode 100644 api/server/router/distribution/distribution_routes.go create mode 100644 api/server/router/experimental.go create mode 100644 api/server/router/image/backend.go create mode 100644 api/server/router/image/image.go create mode 100644 api/server/router/image/image_routes.go create mode 100644 api/server/router/local.go create mode 100644 api/server/router/network/backend.go create mode 100644 api/server/router/network/filter.go create mode 100644 api/server/router/network/filter_test.go create mode 100644 api/server/router/network/network.go create mode 100644 api/server/router/network/network_routes.go create mode 100644 api/server/router/plugin/backend.go create mode 100644 api/server/router/plugin/plugin.go create mode 100644 api/server/router/plugin/plugin_routes.go create mode 100644 api/server/router/router.go create mode 100644 api/server/router/session/backend.go create mode 100644 api/server/router/session/session.go create mode 100644 api/server/router/session/session_routes.go create mode 100644 api/server/router/swarm/backend.go create mode 100644 api/server/router/swarm/cluster.go create mode 100644 api/server/router/swarm/cluster_routes.go create mode 100644 api/server/router/swarm/helpers.go create mode 100644 api/server/router/system/backend.go create mode 100644 api/server/router/system/system.go create mode 100644 api/server/router/system/system_routes.go create mode 100644 api/server/router/volume/backend.go create mode 100644 api/server/router/volume/volume.go create mode 100644 api/server/router/volume/volume_routes.go create mode 100644 api/server/router_swapper.go create mode 100644 api/server/server.go create mode 100644 api/server/server_test.go create mode 100644 api/swagger-gen.yaml create mode 100644 api/swagger.yaml create mode 100644 api/templates/server/operation.gotmpl create mode 100644 api/types/auth.go create mode 100644 api/types/backend/backend.go create mode 100644 api/types/backend/build.go create mode 100644 api/types/blkiodev/blkio.go create mode 100644 api/types/client.go create mode 100644 api/types/configs.go create mode 100644 api/types/container/config.go create mode 100644 api/types/container/container_changes.go create mode 100644 api/types/container/container_create.go create mode 100644 api/types/container/container_top.go create mode 100644 api/types/container/container_update.go create mode 100644 api/types/container/container_wait.go create mode 100644 api/types/container/host_config.go create mode 100644 api/types/container/hostconfig_unix.go create mode 100644 api/types/container/hostconfig_windows.go create mode 100644 api/types/container/waitcondition.go create mode 100644 api/types/error_response.go create mode 100644 api/types/events/events.go create mode 100644 api/types/filters/parse.go create mode 100644 api/types/filters/parse_test.go create mode 100644 api/types/graph_driver_data.go create mode 100644 api/types/id_response.go create mode 100644 api/types/image/image_history.go create mode 100644 api/types/image_delete_response_item.go create mode 100644 api/types/image_summary.go create mode 100644 api/types/mount/mount.go create mode 100644 api/types/network/network.go create mode 100644 api/types/plugin.go create mode 100644 api/types/plugin_device.go create mode 100644 api/types/plugin_env.go create mode 100644 api/types/plugin_interface_type.go create mode 100644 api/types/plugin_mount.go create mode 100644 api/types/plugin_responses.go create mode 100644 api/types/plugins/logdriver/entry.pb.go create mode 100644 api/types/plugins/logdriver/entry.proto create mode 100644 api/types/plugins/logdriver/gen.go create mode 100644 api/types/plugins/logdriver/io.go create mode 100644 api/types/port.go create mode 100644 api/types/registry/authenticate.go create mode 100644 api/types/registry/registry.go create mode 100644 api/types/seccomp.go create mode 100644 api/types/service_update_response.go create mode 100644 api/types/stats.go create mode 100644 api/types/strslice/strslice.go create mode 100644 api/types/strslice/strslice_test.go create mode 100644 api/types/swarm/common.go create mode 100644 api/types/swarm/config.go create mode 100644 api/types/swarm/container.go create mode 100644 api/types/swarm/network.go create mode 100644 api/types/swarm/node.go create mode 100644 api/types/swarm/runtime.go create mode 100644 api/types/swarm/secret.go create mode 100644 api/types/swarm/service.go create mode 100644 api/types/swarm/swarm.go create mode 100644 api/types/swarm/task.go create mode 100644 api/types/time/duration_convert.go create mode 100644 api/types/time/duration_convert_test.go create mode 100644 api/types/time/timestamp.go create mode 100644 api/types/time/timestamp_test.go create mode 100644 api/types/types.go create mode 100644 api/types/versions/README.md create mode 100644 api/types/versions/compare.go create mode 100644 api/types/versions/compare_test.go create mode 100644 api/types/versions/v1p19/types.go create mode 100644 api/types/versions/v1p20/types.go create mode 100644 api/types/volume.go create mode 100644 api/types/volume/volumes_create.go create mode 100644 api/types/volume/volumes_list.go create mode 100755 build-allarch.sh create mode 100755 build-arm.sh create mode 100755 build.sh create mode 100644 builder/builder.go create mode 100644 builder/dockerfile/bflag.go create mode 100644 builder/dockerfile/bflag_test.go create mode 100644 builder/dockerfile/buildargs.go create mode 100644 builder/dockerfile/buildargs_test.go create mode 100644 builder/dockerfile/builder.go create mode 100644 builder/dockerfile/builder_test.go create mode 100644 builder/dockerfile/builder_unix.go create mode 100644 builder/dockerfile/builder_windows.go create mode 100644 builder/dockerfile/clientsession.go create mode 100644 builder/dockerfile/command/command.go create mode 100644 builder/dockerfile/containerbackend.go create mode 100644 builder/dockerfile/copy.go create mode 100644 builder/dockerfile/copy_test.go create mode 100644 builder/dockerfile/copy_unix.go create mode 100644 builder/dockerfile/copy_windows.go create mode 100644 builder/dockerfile/dispatchers.go create mode 100644 builder/dockerfile/dispatchers_test.go create mode 100644 builder/dockerfile/dispatchers_unix.go create mode 100644 builder/dockerfile/dispatchers_unix_test.go create mode 100644 builder/dockerfile/dispatchers_windows.go create mode 100644 builder/dockerfile/dispatchers_windows_test.go create mode 100644 builder/dockerfile/envVarTest create mode 100644 builder/dockerfile/evaluator.go create mode 100644 builder/dockerfile/evaluator_test.go create mode 100644 builder/dockerfile/evaluator_unix.go create mode 100644 builder/dockerfile/evaluator_windows.go create mode 100644 builder/dockerfile/imagecontext.go create mode 100644 builder/dockerfile/imageprobe.go create mode 100644 builder/dockerfile/internals.go create mode 100644 builder/dockerfile/internals_test.go create mode 100644 builder/dockerfile/internals_unix.go create mode 100644 builder/dockerfile/internals_windows.go create mode 100644 builder/dockerfile/internals_windows_test.go create mode 100644 builder/dockerfile/metrics.go create mode 100644 builder/dockerfile/mockbackend_test.go create mode 100644 builder/dockerfile/parser/dumper/main.go create mode 100644 builder/dockerfile/parser/json_test.go create mode 100644 builder/dockerfile/parser/line_parsers.go create mode 100644 builder/dockerfile/parser/line_parsers_test.go create mode 100644 builder/dockerfile/parser/parser.go create mode 100644 builder/dockerfile/parser/parser_test.go create mode 100644 builder/dockerfile/parser/split_command.go create mode 100644 builder/dockerfile/parser/testfile-line/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result create mode 100644 builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/brimstone-consuldock/result create mode 100644 builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/brimstone-docker-consul/result create mode 100644 builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/continue-at-eof/result create mode 100644 builder/dockerfile/parser/testfiles/continueIndent/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/continueIndent/result create mode 100644 builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/cpuguy83-nagios/result create mode 100644 builder/dockerfile/parser/testfiles/docker/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/docker/result create mode 100644 builder/dockerfile/parser/testfiles/env/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/env/result create mode 100644 builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/escape-after-comment/result create mode 100644 builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/escape-nonewline/result create mode 100644 builder/dockerfile/parser/testfiles/escape/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/escape/result create mode 100644 builder/dockerfile/parser/testfiles/escapes/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/escapes/result create mode 100644 builder/dockerfile/parser/testfiles/flags/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/flags/result create mode 100644 builder/dockerfile/parser/testfiles/health/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/health/result create mode 100644 builder/dockerfile/parser/testfiles/influxdb/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/influxdb/result create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result create mode 100644 builder/dockerfile/parser/testfiles/json/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/json/result create mode 100644 builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result create mode 100644 builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result create mode 100644 builder/dockerfile/parser/testfiles/mail/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/mail/result create mode 100644 builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/multiple-volumes/result create mode 100644 builder/dockerfile/parser/testfiles/mumble/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/mumble/result create mode 100644 builder/dockerfile/parser/testfiles/nginx/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/nginx/result create mode 100644 builder/dockerfile/parser/testfiles/tf2/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/tf2/result create mode 100644 builder/dockerfile/parser/testfiles/weechat/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/weechat/result create mode 100644 builder/dockerfile/parser/testfiles/znc/Dockerfile create mode 100644 builder/dockerfile/parser/testfiles/znc/result create mode 100644 builder/dockerfile/shell_parser.go create mode 100644 builder/dockerfile/shell_parser_test.go create mode 100644 builder/dockerfile/support.go create mode 100644 builder/dockerfile/support_test.go create mode 100644 builder/dockerfile/utils_test.go create mode 100644 builder/dockerfile/wordsTest create mode 100644 builder/dockerignore/dockerignore.go create mode 100644 builder/dockerignore/dockerignore_test.go create mode 100644 builder/fscache/fscache.go create mode 100644 builder/fscache/fscache_test.go create mode 100644 builder/fscache/naivedriver.go create mode 100644 builder/remotecontext/archive.go create mode 100644 builder/remotecontext/detect.go create mode 100644 builder/remotecontext/detect_test.go create mode 100644 builder/remotecontext/filehash.go create mode 100644 builder/remotecontext/generate.go create mode 100644 builder/remotecontext/git.go create mode 100644 builder/remotecontext/git/gitutils.go create mode 100644 builder/remotecontext/git/gitutils_test.go create mode 100644 builder/remotecontext/lazycontext.go create mode 100644 builder/remotecontext/mimetype.go create mode 100644 builder/remotecontext/mimetype_test.go create mode 100644 builder/remotecontext/remote.go create mode 100644 builder/remotecontext/remote_test.go create mode 100644 builder/remotecontext/tarsum.go create mode 100644 builder/remotecontext/tarsum.pb.go create mode 100644 builder/remotecontext/tarsum.proto create mode 100644 builder/remotecontext/tarsum_test.go create mode 100644 builder/remotecontext/utils_test.go create mode 100644 cli/cobra.go create mode 100644 cli/config/configdir.go create mode 100644 cli/debug/debug.go create mode 100644 cli/debug/debug_test.go create mode 100644 cli/error.go create mode 100644 cli/required.go create mode 100644 client/README.md create mode 100644 client/build_prune.go create mode 100644 client/checkpoint_create.go create mode 100644 client/checkpoint_create_test.go create mode 100644 client/checkpoint_delete.go create mode 100644 client/checkpoint_delete_test.go create mode 100644 client/checkpoint_list.go create mode 100644 client/checkpoint_list_test.go create mode 100644 client/client.go create mode 100644 client/client_mock_test.go create mode 100644 client/client_test.go create mode 100644 client/client_unix.go create mode 100644 client/client_windows.go create mode 100644 client/config_create.go create mode 100644 client/config_create_test.go create mode 100644 client/config_inspect.go create mode 100644 client/config_inspect_test.go create mode 100644 client/config_list.go create mode 100644 client/config_list_test.go create mode 100644 client/config_remove.go create mode 100644 client/config_remove_test.go create mode 100644 client/config_update.go create mode 100644 client/config_update_test.go create mode 100644 client/container_attach.go create mode 100644 client/container_commit.go create mode 100644 client/container_commit_test.go create mode 100644 client/container_copy.go create mode 100644 client/container_copy_test.go create mode 100644 client/container_create.go create mode 100644 client/container_create_test.go create mode 100644 client/container_diff.go create mode 100644 client/container_diff_test.go create mode 100644 client/container_exec.go create mode 100644 client/container_exec_test.go create mode 100644 client/container_export.go create mode 100644 client/container_export_test.go create mode 100644 client/container_inspect.go create mode 100644 client/container_inspect_test.go create mode 100644 client/container_kill.go create mode 100644 client/container_kill_test.go create mode 100644 client/container_list.go create mode 100644 client/container_list_test.go create mode 100644 client/container_logs.go create mode 100644 client/container_logs_test.go create mode 100644 client/container_pause.go create mode 100644 client/container_pause_test.go create mode 100644 client/container_prune.go create mode 100644 client/container_prune_test.go create mode 100644 client/container_remove.go create mode 100644 client/container_remove_test.go create mode 100644 client/container_rename.go create mode 100644 client/container_rename_test.go create mode 100644 client/container_resize.go create mode 100644 client/container_resize_test.go create mode 100644 client/container_restart.go create mode 100644 client/container_restart_test.go create mode 100644 client/container_start.go create mode 100644 client/container_start_test.go create mode 100644 client/container_stats.go create mode 100644 client/container_stats_test.go create mode 100644 client/container_stop.go create mode 100644 client/container_stop_test.go create mode 100644 client/container_top.go create mode 100644 client/container_top_test.go create mode 100644 client/container_unpause.go create mode 100644 client/container_unpause_test.go create mode 100644 client/container_update.go create mode 100644 client/container_update_test.go create mode 100644 client/container_wait.go create mode 100644 client/container_wait_test.go create mode 100644 client/disk_usage.go create mode 100644 client/disk_usage_test.go create mode 100644 client/distribution_inspect.go create mode 100644 client/distribution_inspect_test.go create mode 100644 client/errors.go create mode 100644 client/events.go create mode 100644 client/events_test.go create mode 100644 client/hijack.go create mode 100644 client/image_build.go create mode 100644 client/image_build_test.go create mode 100644 client/image_create.go create mode 100644 client/image_create_test.go create mode 100644 client/image_delta.go create mode 100644 client/image_history.go create mode 100644 client/image_history_test.go create mode 100644 client/image_import.go create mode 100644 client/image_import_test.go create mode 100644 client/image_inspect.go create mode 100644 client/image_inspect_test.go create mode 100644 client/image_list.go create mode 100644 client/image_list_test.go create mode 100644 client/image_load.go create mode 100644 client/image_load_test.go create mode 100644 client/image_prune.go create mode 100644 client/image_prune_test.go create mode 100644 client/image_pull.go create mode 100644 client/image_pull_test.go create mode 100644 client/image_push.go create mode 100644 client/image_push_test.go create mode 100644 client/image_remove.go create mode 100644 client/image_remove_test.go create mode 100644 client/image_save.go create mode 100644 client/image_save_test.go create mode 100644 client/image_search.go create mode 100644 client/image_search_test.go create mode 100644 client/image_tag.go create mode 100644 client/image_tag_test.go create mode 100644 client/info.go create mode 100644 client/info_test.go create mode 100644 client/interface.go create mode 100644 client/interface_experimental.go create mode 100644 client/interface_stable.go create mode 100644 client/login.go create mode 100644 client/network_connect.go create mode 100644 client/network_connect_test.go create mode 100644 client/network_create.go create mode 100644 client/network_create_test.go create mode 100644 client/network_disconnect.go create mode 100644 client/network_disconnect_test.go create mode 100644 client/network_inspect.go create mode 100644 client/network_inspect_test.go create mode 100644 client/network_list.go create mode 100644 client/network_list_test.go create mode 100644 client/network_prune.go create mode 100644 client/network_prune_test.go create mode 100644 client/network_remove.go create mode 100644 client/network_remove_test.go create mode 100644 client/node_inspect.go create mode 100644 client/node_inspect_test.go create mode 100644 client/node_list.go create mode 100644 client/node_list_test.go create mode 100644 client/node_remove.go create mode 100644 client/node_remove_test.go create mode 100644 client/node_update.go create mode 100644 client/node_update_test.go create mode 100644 client/parse_logs.go create mode 100644 client/parse_logs_test.go create mode 100644 client/ping.go create mode 100644 client/ping_test.go create mode 100644 client/plugin_create.go create mode 100644 client/plugin_disable.go create mode 100644 client/plugin_disable_test.go create mode 100644 client/plugin_enable.go create mode 100644 client/plugin_enable_test.go create mode 100644 client/plugin_inspect.go create mode 100644 client/plugin_inspect_test.go create mode 100644 client/plugin_install.go create mode 100644 client/plugin_list.go create mode 100644 client/plugin_list_test.go create mode 100644 client/plugin_push.go create mode 100644 client/plugin_push_test.go create mode 100644 client/plugin_remove.go create mode 100644 client/plugin_remove_test.go create mode 100644 client/plugin_set.go create mode 100644 client/plugin_set_test.go create mode 100644 client/plugin_upgrade.go create mode 100644 client/request.go create mode 100644 client/request_test.go create mode 100644 client/secret_create.go create mode 100644 client/secret_create_test.go create mode 100644 client/secret_inspect.go create mode 100644 client/secret_inspect_test.go create mode 100644 client/secret_list.go create mode 100644 client/secret_list_test.go create mode 100644 client/secret_remove.go create mode 100644 client/secret_remove_test.go create mode 100644 client/secret_update.go create mode 100644 client/secret_update_test.go create mode 100644 client/service_create.go create mode 100644 client/service_create_test.go create mode 100644 client/service_inspect.go create mode 100644 client/service_inspect_test.go create mode 100644 client/service_list.go create mode 100644 client/service_list_test.go create mode 100644 client/service_logs.go create mode 100644 client/service_logs_test.go create mode 100644 client/service_remove.go create mode 100644 client/service_remove_test.go create mode 100644 client/service_update.go create mode 100644 client/service_update_test.go create mode 100644 client/session.go create mode 100644 client/session/filesync/diffcopy.go create mode 100644 client/session/filesync/filesync.go create mode 100644 client/session/filesync/filesync.pb.go create mode 100644 client/session/filesync/filesync.proto create mode 100644 client/session/filesync/generate.go create mode 100644 client/session/filesync/tarstream.go create mode 100644 client/session/grpc.go create mode 100644 client/session/manager.go create mode 100644 client/session/session.go create mode 100644 client/swarm_get_unlock_key.go create mode 100644 client/swarm_get_unlock_key_test.go create mode 100644 client/swarm_init.go create mode 100644 client/swarm_init_test.go create mode 100644 client/swarm_inspect.go create mode 100644 client/swarm_inspect_test.go create mode 100644 client/swarm_join.go create mode 100644 client/swarm_join_test.go create mode 100644 client/swarm_leave.go create mode 100644 client/swarm_leave_test.go create mode 100644 client/swarm_unlock.go create mode 100644 client/swarm_unlock_test.go create mode 100644 client/swarm_update.go create mode 100644 client/swarm_update_test.go create mode 100644 client/task_inspect.go create mode 100644 client/task_inspect_test.go create mode 100644 client/task_list.go create mode 100644 client/task_list_test.go create mode 100644 client/task_logs.go create mode 100644 client/testdata/ca.pem create mode 100644 client/testdata/cert.pem create mode 100644 client/testdata/key.pem create mode 100644 client/transport.go create mode 100644 client/utils.go create mode 100644 client/version.go create mode 100644 client/volume_create.go create mode 100644 client/volume_create_test.go create mode 100644 client/volume_inspect.go create mode 100644 client/volume_inspect_test.go create mode 100644 client/volume_list.go create mode 100644 client/volume_list_test.go create mode 100644 client/volume_prune.go create mode 100644 client/volume_remove.go create mode 100644 client/volume_remove_test.go create mode 100644 cmd/docker/main.go create mode 100644 cmd/dockerd/README.md create mode 100644 cmd/dockerd/config.go create mode 100644 cmd/dockerd/config_common_unix.go create mode 100644 cmd/dockerd/config_experimental.go create mode 100644 cmd/dockerd/config_solaris.go create mode 100644 cmd/dockerd/config_unix.go create mode 100644 cmd/dockerd/config_unix_test.go create mode 100644 cmd/dockerd/config_windows.go create mode 100644 cmd/dockerd/daemon.go create mode 100644 cmd/dockerd/daemon_freebsd.go create mode 100644 cmd/dockerd/daemon_linux.go create mode 100644 cmd/dockerd/daemon_solaris.go create mode 100644 cmd/dockerd/daemon_test.go create mode 100644 cmd/dockerd/daemon_unix.go create mode 100644 cmd/dockerd/daemon_unix_test.go create mode 100644 cmd/dockerd/daemon_windows.go create mode 100644 cmd/dockerd/docker.go create mode 100644 cmd/dockerd/docker_windows.go create mode 100644 cmd/dockerd/hack/malformed_host_override.go create mode 100644 cmd/dockerd/hack/malformed_host_override_test.go create mode 100644 cmd/dockerd/metrics.go create mode 100644 cmd/dockerd/options.go create mode 100644 cmd/dockerd/options_test.go create mode 100644 cmd/dockerd/service_unsupported.go create mode 100644 cmd/dockerd/service_windows.go create mode 100644 cmd/mobynit/main.go create mode 100644 container/archive.go create mode 100644 container/container.go create mode 100644 container/container_linux.go create mode 100644 container/container_notlinux.go create mode 100644 container/container_unit_test.go create mode 100644 container/container_unix.go create mode 100644 container/container_windows.go create mode 100644 container/env.go create mode 100644 container/env_test.go create mode 100644 container/health.go create mode 100644 container/history.go create mode 100644 container/memory_store.go create mode 100644 container/memory_store_test.go create mode 100644 container/monitor.go create mode 100644 container/mounts_unix.go create mode 100644 container/mounts_windows.go create mode 100644 container/state.go create mode 100644 container/state_solaris.go create mode 100644 container/state_test.go create mode 100644 container/state_unix.go create mode 100644 container/state_windows.go create mode 100644 container/store.go create mode 100644 container/stream/attach.go create mode 100644 container/stream/streams.go create mode 100644 container/view.go create mode 100644 container/view_test.go create mode 100644 contrib/README.md create mode 100644 contrib/REVIEWERS create mode 100644 contrib/apparmor/main.go create mode 100644 contrib/apparmor/template.go create mode 100755 contrib/builder/deb/aarch64/build.sh create mode 100644 contrib/builder/deb/aarch64/debian-jessie/Dockerfile create mode 100644 contrib/builder/deb/aarch64/debian-stretch/Dockerfile create mode 100755 contrib/builder/deb/aarch64/generate.sh create mode 100644 contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile create mode 100644 contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile create mode 100644 contrib/builder/deb/amd64/README.md create mode 100755 contrib/builder/deb/amd64/build.sh create mode 100644 contrib/builder/deb/amd64/debian-jessie/Dockerfile create mode 100644 contrib/builder/deb/amd64/debian-stretch/Dockerfile create mode 100644 contrib/builder/deb/amd64/debian-wheezy/Dockerfile create mode 100755 contrib/builder/deb/amd64/generate.sh create mode 100644 contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile create mode 100644 contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile create mode 100644 contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile create mode 100644 contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile create mode 100644 contrib/builder/deb/armhf/debian-jessie/Dockerfile create mode 100755 contrib/builder/deb/armhf/generate.sh create mode 100644 contrib/builder/deb/armhf/raspbian-jessie/Dockerfile create mode 100644 contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile create mode 100644 contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile create mode 100644 contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile create mode 100755 contrib/builder/deb/ppc64le/build.sh create mode 100755 contrib/builder/deb/ppc64le/generate.sh create mode 100644 contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile create mode 100644 contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile create mode 100644 contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile create mode 100755 contrib/builder/deb/s390x/build.sh create mode 100755 contrib/builder/deb/s390x/generate.sh create mode 100644 contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile create mode 100644 contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile create mode 100644 contrib/builder/rpm/amd64/README.md create mode 100644 contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile create mode 100755 contrib/builder/rpm/amd64/build.sh create mode 100644 contrib/builder/rpm/amd64/centos-7/Dockerfile create mode 100644 contrib/builder/rpm/amd64/fedora-24/Dockerfile create mode 100644 contrib/builder/rpm/amd64/fedora-25/Dockerfile create mode 100755 contrib/builder/rpm/amd64/generate.sh create mode 100644 contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile create mode 100644 contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile create mode 100644 contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile create mode 100644 contrib/builder/rpm/amd64/photon-1.0/Dockerfile create mode 100755 contrib/builder/rpm/armhf/build.sh create mode 100644 contrib/builder/rpm/armhf/centos-7/Dockerfile create mode 100755 contrib/builder/rpm/armhf/generate.sh create mode 100755 contrib/builder/rpm/ppc64le/build.sh create mode 100644 contrib/builder/rpm/ppc64le/centos-7/Dockerfile create mode 100644 contrib/builder/rpm/ppc64le/fedora-24/Dockerfile create mode 100755 contrib/builder/rpm/ppc64le/generate.sh create mode 100644 contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile create mode 100755 contrib/builder/rpm/s390x/build.sh create mode 100644 contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile create mode 100755 contrib/builder/rpm/s390x/generate.sh create mode 100644 contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile create mode 100755 contrib/check-config.sh create mode 100644 contrib/desktop-integration/README.md create mode 100644 contrib/desktop-integration/chromium/Dockerfile create mode 100644 contrib/desktop-integration/gparted/Dockerfile create mode 100644 contrib/docker-device-tool/README.md create mode 100644 contrib/docker-device-tool/device_tool.go create mode 100644 contrib/docker-device-tool/device_tool_windows.go create mode 100755 contrib/docker-machine-install-bundle.sh create mode 100755 contrib/dockerize-disk.sh create mode 100755 contrib/download-frozen-image-v1.sh create mode 100755 contrib/download-frozen-image-v2.sh create mode 100644 contrib/editorconfig create mode 100644 contrib/gitdm/aliases create mode 100644 contrib/gitdm/domain-map create mode 100755 contrib/gitdm/generate_aliases.sh create mode 100644 contrib/gitdm/gitdm.config create mode 100644 contrib/httpserver/Dockerfile create mode 100644 contrib/httpserver/Dockerfile.solaris create mode 100644 contrib/httpserver/server.go create mode 100644 contrib/init/openrc/docker.confd create mode 100644 contrib/init/openrc/docker.initd create mode 100644 contrib/init/systemd/REVIEWERS create mode 100644 contrib/init/systemd/docker.service create mode 100644 contrib/init/systemd/docker.service.rpm create mode 100644 contrib/init/systemd/docker.socket create mode 100755 contrib/init/sysvinit-debian/docker create mode 100644 contrib/init/sysvinit-debian/docker.default create mode 100755 contrib/init/sysvinit-redhat/docker create mode 100644 contrib/init/sysvinit-redhat/docker.sysconfig create mode 100644 contrib/init/upstart/REVIEWERS create mode 100644 contrib/init/upstart/docker.conf create mode 100755 contrib/install.sh create mode 100755 contrib/mac-install-bundle.sh create mode 100755 contrib/mkimage-alpine.sh create mode 100644 contrib/mkimage-arch-pacman.conf create mode 100755 contrib/mkimage-arch.sh create mode 100644 contrib/mkimage-archarm-pacman.conf create mode 100755 contrib/mkimage-crux.sh create mode 100755 contrib/mkimage-pld.sh create mode 100755 contrib/mkimage-yum.sh create mode 100755 contrib/mkimage.sh create mode 100755 contrib/mkimage/.febootstrap-minimize create mode 100755 contrib/mkimage/busybox-static create mode 100755 contrib/mkimage/debootstrap create mode 100755 contrib/mkimage/mageia-urpmi create mode 100755 contrib/mkimage/rinse create mode 100755 contrib/mkimage/solaris create mode 100644 contrib/nnp-test/Dockerfile create mode 100644 contrib/nnp-test/nnp-test.c create mode 100755 contrib/nuke-graph-directory.sh create mode 100755 contrib/project-stats.sh create mode 100755 contrib/report-issue.sh create mode 100755 contrib/reprepro/suites.sh create mode 100644 contrib/syntax/nano/Dockerfile.nanorc create mode 100644 contrib/syntax/nano/README.md create mode 100644 contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences create mode 100644 contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage create mode 100644 contrib/syntax/textmate/Docker.tmbundle/info.plist create mode 100644 contrib/syntax/textmate/README.md create mode 100644 contrib/syntax/textmate/REVIEWERS create mode 100644 contrib/syntax/vim/LICENSE create mode 100644 contrib/syntax/vim/README.md create mode 100644 contrib/syntax/vim/doc/dockerfile.txt create mode 100644 contrib/syntax/vim/ftdetect/dockerfile.vim create mode 100644 contrib/syntax/vim/syntax/dockerfile.vim create mode 100644 contrib/syscall-test/Dockerfile create mode 100644 contrib/syscall-test/acct.c create mode 100644 contrib/syscall-test/exit32.s create mode 100644 contrib/syscall-test/ns.c create mode 100644 contrib/syscall-test/raw.c create mode 100644 contrib/syscall-test/setgid.c create mode 100644 contrib/syscall-test/setuid.c create mode 100644 contrib/syscall-test/socket.c create mode 100644 contrib/syscall-test/userns.c create mode 100644 contrib/udev/80-docker.rules create mode 100644 contrib/vagrant-docker/README.md create mode 100644 daemon/apparmor_default.go create mode 100644 daemon/apparmor_default_unsupported.go create mode 100644 daemon/archive.go create mode 100644 daemon/archive_tarcopyoptions.go create mode 100644 daemon/archive_tarcopyoptions_unix.go create mode 100644 daemon/archive_tarcopyoptions_windows.go create mode 100644 daemon/archive_unix.go create mode 100644 daemon/archive_windows.go create mode 100644 daemon/attach.go create mode 100644 daemon/auth.go create mode 100644 daemon/bindmount_solaris.go create mode 100644 daemon/bindmount_unix.go create mode 100644 daemon/build.go create mode 100644 daemon/cache.go create mode 100644 daemon/caps/utils_unix.go create mode 100644 daemon/changes.go create mode 100644 daemon/checkpoint.go create mode 100644 daemon/cluster.go create mode 100644 daemon/cluster/cluster.go create mode 100644 daemon/cluster/configs.go create mode 100644 daemon/cluster/controllers/plugin/controller.go create mode 100644 daemon/cluster/convert/config.go create mode 100644 daemon/cluster/convert/container.go create mode 100644 daemon/cluster/convert/network.go create mode 100644 daemon/cluster/convert/node.go create mode 100644 daemon/cluster/convert/secret.go create mode 100644 daemon/cluster/convert/service.go create mode 100644 daemon/cluster/convert/service_test.go create mode 100644 daemon/cluster/convert/swarm.go create mode 100644 daemon/cluster/convert/task.go create mode 100644 daemon/cluster/executor/backend.go create mode 100644 daemon/cluster/executor/container/adapter.go create mode 100644 daemon/cluster/executor/container/attachment.go create mode 100644 daemon/cluster/executor/container/container.go create mode 100644 daemon/cluster/executor/container/controller.go create mode 100644 daemon/cluster/executor/container/errors.go create mode 100644 daemon/cluster/executor/container/executor.go create mode 100644 daemon/cluster/executor/container/health_test.go create mode 100644 daemon/cluster/executor/container/validate.go create mode 100644 daemon/cluster/executor/container/validate_test.go create mode 100644 daemon/cluster/executor/container/validate_unix_test.go create mode 100644 daemon/cluster/executor/container/validate_windows_test.go create mode 100644 daemon/cluster/filters.go create mode 100644 daemon/cluster/filters_test.go create mode 100644 daemon/cluster/helpers.go create mode 100644 daemon/cluster/listen_addr.go create mode 100644 daemon/cluster/listen_addr_linux.go create mode 100644 daemon/cluster/listen_addr_others.go create mode 100644 daemon/cluster/listen_addr_solaris.go create mode 100644 daemon/cluster/networks.go create mode 100644 daemon/cluster/noderunner.go create mode 100644 daemon/cluster/nodes.go create mode 100644 daemon/cluster/provider/network.go create mode 100644 daemon/cluster/secrets.go create mode 100644 daemon/cluster/services.go create mode 100644 daemon/cluster/swarm.go create mode 100644 daemon/cluster/tasks.go create mode 100644 daemon/cluster/utils.go create mode 100644 daemon/commit.go create mode 100644 daemon/config/config.go create mode 100644 daemon/config/config_common_unix.go create mode 100644 daemon/config/config_common_unix_test.go create mode 100644 daemon/config/config_solaris.go create mode 100644 daemon/config/config_test.go create mode 100644 daemon/config/config_unix.go create mode 100644 daemon/config/config_unix_test.go create mode 100644 daemon/config/config_windows.go create mode 100644 daemon/config/config_windows_test.go create mode 100644 daemon/configs.go create mode 100644 daemon/configs_linux.go create mode 100644 daemon/configs_unsupported.go create mode 100644 daemon/configs_windows.go create mode 100644 daemon/container.go create mode 100644 daemon/container_linux.go create mode 100644 daemon/container_operations.go create mode 100644 daemon/container_operations_solaris.go create mode 100644 daemon/container_operations_unix.go create mode 100644 daemon/container_operations_windows.go create mode 100644 daemon/container_windows.go create mode 100644 daemon/create.go create mode 100644 daemon/create_unix.go create mode 100644 daemon/create_windows.go create mode 100644 daemon/daemon.go create mode 100644 daemon/daemon_experimental.go create mode 100644 daemon/daemon_linux.go create mode 100644 daemon/daemon_linux_test.go create mode 100644 daemon/daemon_solaris.go create mode 100644 daemon/daemon_test.go create mode 100644 daemon/daemon_unix.go create mode 100644 daemon/daemon_unix_test.go create mode 100644 daemon/daemon_unsupported.go create mode 100644 daemon/daemon_windows.go create mode 100644 daemon/debugtrap_unix.go create mode 100644 daemon/debugtrap_unsupported.go create mode 100644 daemon/debugtrap_windows.go create mode 100644 daemon/delete.go create mode 100644 daemon/delete_test.go create mode 100644 daemon/dependency.go create mode 100644 daemon/discovery/discovery.go create mode 100644 daemon/discovery/discovery_test.go create mode 100644 daemon/disk_usage.go create mode 100644 daemon/errors.go create mode 100644 daemon/events.go create mode 100644 daemon/events/events.go create mode 100644 daemon/events/events_test.go create mode 100644 daemon/events/filter.go create mode 100644 daemon/events/metrics.go create mode 100644 daemon/events/testutils/testutils.go create mode 100644 daemon/events_test.go create mode 100644 daemon/exec.go create mode 100644 daemon/exec/exec.go create mode 100644 daemon/exec_linux.go create mode 100644 daemon/exec_solaris.go create mode 100644 daemon/exec_windows.go create mode 100644 daemon/export.go create mode 100644 daemon/getsize_unix.go create mode 100644 daemon/graphdriver/aufs/aufs.go create mode 100644 daemon/graphdriver/aufs/aufs_test.go create mode 100644 daemon/graphdriver/aufs/dirs.go create mode 100644 daemon/graphdriver/aufs/mount.go create mode 100644 daemon/graphdriver/aufs/mount_linux.go create mode 100644 daemon/graphdriver/aufs/mount_unsupported.go create mode 100644 daemon/graphdriver/btrfs/btrfs.go create mode 100644 daemon/graphdriver/btrfs/btrfs_test.go create mode 100644 daemon/graphdriver/btrfs/dummy_unsupported.go create mode 100644 daemon/graphdriver/btrfs/version.go create mode 100644 daemon/graphdriver/btrfs/version_none.go create mode 100644 daemon/graphdriver/btrfs/version_test.go create mode 100644 daemon/graphdriver/counter.go create mode 100644 daemon/graphdriver/devmapper/README.md create mode 100644 daemon/graphdriver/devmapper/device_setup.go create mode 100644 daemon/graphdriver/devmapper/deviceset.go create mode 100644 daemon/graphdriver/devmapper/devmapper_doc.go create mode 100644 daemon/graphdriver/devmapper/devmapper_test.go create mode 100644 daemon/graphdriver/devmapper/driver.go create mode 100644 daemon/graphdriver/devmapper/mount.go create mode 100644 daemon/graphdriver/driver.go create mode 100644 daemon/graphdriver/driver_freebsd.go create mode 100644 daemon/graphdriver/driver_linux.go create mode 100644 daemon/graphdriver/driver_solaris.go create mode 100644 daemon/graphdriver/driver_unsupported.go create mode 100644 daemon/graphdriver/driver_windows.go create mode 100644 daemon/graphdriver/fsdiff.go create mode 100644 daemon/graphdriver/graphtest/graphbench_unix.go create mode 100644 daemon/graphdriver/graphtest/graphtest_unix.go create mode 100644 daemon/graphdriver/graphtest/graphtest_windows.go create mode 100644 daemon/graphdriver/graphtest/testutil.go create mode 100644 daemon/graphdriver/graphtest/testutil_unix.go create mode 100644 daemon/graphdriver/lcow/lcow.go create mode 100644 daemon/graphdriver/overlay/copy.go create mode 100644 daemon/graphdriver/overlay/overlay.go create mode 100644 daemon/graphdriver/overlay/overlay_test.go create mode 100644 daemon/graphdriver/overlay/overlay_unsupported.go create mode 100644 daemon/graphdriver/overlay2/check.go create mode 100644 daemon/graphdriver/overlay2/mount.go create mode 100644 daemon/graphdriver/overlay2/overlay.go create mode 100644 daemon/graphdriver/overlay2/overlay_test.go create mode 100644 daemon/graphdriver/overlay2/overlay_unsupported.go create mode 100644 daemon/graphdriver/overlay2/randomid.go create mode 100644 daemon/graphdriver/overlayutils/overlayutils.go create mode 100644 daemon/graphdriver/plugin.go create mode 100644 daemon/graphdriver/proxy.go create mode 100644 daemon/graphdriver/quota/projectquota.go create mode 100644 daemon/graphdriver/register/register_aufs.go create mode 100644 daemon/graphdriver/register/register_btrfs.go create mode 100644 daemon/graphdriver/register/register_devicemapper.go create mode 100644 daemon/graphdriver/register/register_overlay.go create mode 100644 daemon/graphdriver/register/register_vfs.go create mode 100644 daemon/graphdriver/register/register_windows.go create mode 100644 daemon/graphdriver/register/register_zfs.go create mode 100644 daemon/graphdriver/vfs/driver.go create mode 100644 daemon/graphdriver/vfs/vfs_test.go create mode 100644 daemon/graphdriver/windows/windows.go create mode 100644 daemon/graphdriver/zfs/MAINTAINERS create mode 100644 daemon/graphdriver/zfs/zfs.go create mode 100644 daemon/graphdriver/zfs/zfs_freebsd.go create mode 100644 daemon/graphdriver/zfs/zfs_linux.go create mode 100644 daemon/graphdriver/zfs/zfs_solaris.go create mode 100644 daemon/graphdriver/zfs/zfs_test.go create mode 100644 daemon/graphdriver/zfs/zfs_unsupported.go create mode 100644 daemon/health.go create mode 100644 daemon/health_test.go create mode 100644 daemon/image.go create mode 100644 daemon/image_delete.go create mode 100644 daemon/image_exporter.go create mode 100644 daemon/image_history.go create mode 100644 daemon/image_inspect.go create mode 100644 daemon/image_pull.go create mode 100644 daemon/image_push.go create mode 100644 daemon/image_tag.go create mode 100644 daemon/images.go create mode 100644 daemon/import.go create mode 100644 daemon/info.go create mode 100644 daemon/info_unix.go create mode 100644 daemon/info_unix_test.go create mode 100644 daemon/info_windows.go create mode 100644 daemon/initlayer/setup_solaris.go create mode 100644 daemon/initlayer/setup_unix.go create mode 100644 daemon/initlayer/setup_windows.go create mode 100644 daemon/inspect.go create mode 100644 daemon/inspect_solaris.go create mode 100644 daemon/inspect_unix.go create mode 100644 daemon/inspect_windows.go create mode 100644 daemon/keys.go create mode 100644 daemon/keys_unsupported.go create mode 100644 daemon/kill.go create mode 100644 daemon/links.go create mode 100644 daemon/links/links.go create mode 100644 daemon/links/links_test.go create mode 100644 daemon/list.go create mode 100644 daemon/list_unix.go create mode 100644 daemon/list_windows.go create mode 100644 daemon/logdrivers_linux.go create mode 100644 daemon/logdrivers_windows.go create mode 100644 daemon/logger/adapter.go create mode 100644 daemon/logger/adapter_test.go create mode 100644 daemon/logger/awslogs/cloudwatchlogs.go create mode 100644 daemon/logger/awslogs/cloudwatchlogs_test.go create mode 100644 daemon/logger/awslogs/cwlogsiface_mock_test.go create mode 100644 daemon/logger/copier.go create mode 100644 daemon/logger/copier_test.go create mode 100644 daemon/logger/etwlogs/etwlogs_windows.go create mode 100644 daemon/logger/factory.go create mode 100644 daemon/logger/fluentd/fluentd.go create mode 100644 daemon/logger/gcplogs/gcplogging.go create mode 100644 daemon/logger/gcplogs/gcplogging_linux.go create mode 100644 daemon/logger/gcplogs/gcplogging_others.go create mode 100644 daemon/logger/gelf/gelf.go create mode 100644 daemon/logger/gelf/gelf_unsupported.go create mode 100644 daemon/logger/journald/journald.go create mode 100644 daemon/logger/journald/journald_test.go create mode 100644 daemon/logger/journald/journald_unsupported.go create mode 100644 daemon/logger/journald/read.go create mode 100644 daemon/logger/journald/read_native.go create mode 100644 daemon/logger/journald/read_native_compat.go create mode 100644 daemon/logger/journald/read_unsupported.go create mode 100644 daemon/logger/jsonfilelog/jsonfilelog.go create mode 100644 daemon/logger/jsonfilelog/jsonfilelog_test.go create mode 100644 daemon/logger/jsonfilelog/multireader/multireader.go create mode 100644 daemon/logger/jsonfilelog/multireader/multireader_test.go create mode 100644 daemon/logger/jsonfilelog/read.go create mode 100644 daemon/logger/logentries/logentries.go create mode 100644 daemon/logger/logger.go create mode 100644 daemon/logger/logger_test.go create mode 100644 daemon/logger/loggerutils/log_tag.go create mode 100644 daemon/logger/loggerutils/log_tag_test.go create mode 100644 daemon/logger/loggerutils/rotatefilewriter.go create mode 100644 daemon/logger/loginfo.go create mode 100644 daemon/logger/plugin.go create mode 100644 daemon/logger/plugin_unix.go create mode 100644 daemon/logger/plugin_unsupported.go create mode 100644 daemon/logger/proxy.go create mode 100644 daemon/logger/ring.go create mode 100644 daemon/logger/ring_test.go create mode 100644 daemon/logger/splunk/splunk.go create mode 100644 daemon/logger/splunk/splunk_test.go create mode 100644 daemon/logger/splunk/splunkhecmock_test.go create mode 100644 daemon/logger/syslog/syslog.go create mode 100644 daemon/logger/syslog/syslog_test.go create mode 100644 daemon/logs.go create mode 100644 daemon/logs_test.go create mode 100644 daemon/metrics.go create mode 100644 daemon/metrics_unix.go create mode 100644 daemon/metrics_unsupported.go create mode 100644 daemon/monitor.go create mode 100644 daemon/monitor_linux.go create mode 100644 daemon/monitor_solaris.go create mode 100644 daemon/monitor_windows.go create mode 100644 daemon/mounts.go create mode 100644 daemon/names.go create mode 100644 daemon/network.go create mode 100644 daemon/network/settings.go create mode 100644 daemon/oci_linux.go create mode 100644 daemon/oci_solaris.go create mode 100644 daemon/oci_windows.go create mode 100644 daemon/pause.go create mode 100644 daemon/prune.go create mode 100644 daemon/reload.go create mode 100644 daemon/reload_test.go create mode 100644 daemon/rename.go create mode 100644 daemon/resize.go create mode 100644 daemon/restart.go create mode 100644 daemon/search.go create mode 100644 daemon/search_test.go create mode 100644 daemon/seccomp_disabled.go create mode 100644 daemon/seccomp_linux.go create mode 100644 daemon/seccomp_unsupported.go create mode 100644 daemon/secrets.go create mode 100644 daemon/secrets_linux.go create mode 100644 daemon/secrets_unsupported.go create mode 100644 daemon/secrets_windows.go create mode 100644 daemon/selinux_linux.go create mode 100644 daemon/selinux_unsupported.go create mode 100644 daemon/start.go create mode 100644 daemon/start_unix.go create mode 100644 daemon/start_windows.go create mode 100644 daemon/stats.go create mode 100644 daemon/stats/collector.go create mode 100644 daemon/stats/collector_solaris.go create mode 100644 daemon/stats/collector_unix.go create mode 100644 daemon/stats/collector_windows.go create mode 100644 daemon/stats/types.go create mode 100644 daemon/stats_collector.go create mode 100644 daemon/stats_unix.go create mode 100644 daemon/stats_windows.go create mode 100644 daemon/stop.go create mode 100644 daemon/top_unix.go create mode 100644 daemon/top_unix_test.go create mode 100644 daemon/top_windows.go create mode 100644 daemon/unpause.go create mode 100644 daemon/update.go create mode 100644 daemon/update_linux.go create mode 100644 daemon/update_solaris.go create mode 100644 daemon/update_windows.go create mode 100644 daemon/volumes.go create mode 100644 daemon/volumes_unit_test.go create mode 100644 daemon/volumes_unix.go create mode 100644 daemon/volumes_unix_test.go create mode 100644 daemon/volumes_windows.go create mode 100644 daemon/wait.go create mode 100644 daemon/workdir.go create mode 100644 distribution/config.go create mode 100644 distribution/errors.go create mode 100644 distribution/fixtures/validate_manifest/bad_manifest create mode 100644 distribution/fixtures/validate_manifest/extra_data_manifest create mode 100644 distribution/fixtures/validate_manifest/good_manifest create mode 100644 distribution/metadata/metadata.go create mode 100644 distribution/metadata/v1_id_service.go create mode 100644 distribution/metadata/v1_id_service_test.go create mode 100644 distribution/metadata/v2_metadata_service.go create mode 100644 distribution/metadata/v2_metadata_service_test.go create mode 100644 distribution/pull.go create mode 100644 distribution/pull_v1.go create mode 100644 distribution/pull_v2.go create mode 100644 distribution/pull_v2_test.go create mode 100644 distribution/pull_v2_unix.go create mode 100644 distribution/pull_v2_windows.go create mode 100644 distribution/push.go create mode 100644 distribution/push_v1.go create mode 100644 distribution/push_v2.go create mode 100644 distribution/push_v2_test.go create mode 100644 distribution/registry.go create mode 100644 distribution/registry_unit_test.go create mode 100644 distribution/utils/progress.go create mode 100644 distribution/xfer/download.go create mode 100644 distribution/xfer/download_test.go create mode 100644 distribution/xfer/transfer.go create mode 100644 distribution/xfer/transfer_test.go create mode 100644 distribution/xfer/upload.go create mode 100644 distribution/xfer/upload_test.go create mode 100644 dockerversion/useragent.go create mode 100644 dockerversion/version_lib.go create mode 100644 docs/api/v1.18.md create mode 100644 docs/api/v1.19.md create mode 100644 docs/api/v1.20.md create mode 100644 docs/api/v1.21.md create mode 100644 docs/api/v1.22.md create mode 100644 docs/api/v1.23.md create mode 100644 docs/api/v1.24.md create mode 100644 docs/api/version-history.md create mode 100644 docs/getting-started.md create mode 100644 docs/static_files/balena-logo-black.svg create mode 100644 docs/static_files/contributors.png create mode 100644 docs/static_files/docker-logo-compressed.png create mode 100644 docs/static_files/moby-project-logo.png create mode 100644 hack/Jenkins/W2L/postbuild.sh create mode 100644 hack/Jenkins/W2L/setup.sh create mode 100644 hack/Jenkins/readme.md create mode 100644 hack/README.md create mode 100755 hack/dind create mode 100644 hack/dockerfile/binaries-commits create mode 100755 hack/dockerfile/install-binaries.sh create mode 100755 hack/generate-authors.sh create mode 100755 hack/generate-swagger-api.sh create mode 100644 hack/install.sh create mode 100644 hack/integration-cli-on-swarm/README.md create mode 100644 hack/integration-cli-on-swarm/agent/Dockerfile create mode 100644 hack/integration-cli-on-swarm/agent/master/call.go create mode 100644 hack/integration-cli-on-swarm/agent/master/master.go create mode 100644 hack/integration-cli-on-swarm/agent/master/set.go create mode 100644 hack/integration-cli-on-swarm/agent/master/set_test.go create mode 100644 hack/integration-cli-on-swarm/agent/types/types.go create mode 100644 hack/integration-cli-on-swarm/agent/vendor.conf create mode 100644 hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE create mode 100644 hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go create mode 100644 hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go create mode 100644 hack/integration-cli-on-swarm/agent/worker/executor.go create mode 100644 hack/integration-cli-on-swarm/agent/worker/worker.go create mode 100644 hack/integration-cli-on-swarm/host/compose.go create mode 100644 hack/integration-cli-on-swarm/host/dockercmd.go create mode 100644 hack/integration-cli-on-swarm/host/enumerate.go create mode 100644 hack/integration-cli-on-swarm/host/enumerate_test.go create mode 100644 hack/integration-cli-on-swarm/host/host.go create mode 100644 hack/integration-cli-on-swarm/host/volume.go create mode 100644 hack/make.ps1 create mode 100755 hack/make.sh create mode 100644 hack/make/.binary create mode 100644 hack/make/.binary-setup create mode 100644 hack/make/.build-deb/compat create mode 100644 hack/make/.build-deb/control create mode 100644 hack/make/.build-deb/docker-engine.bash-completion create mode 120000 hack/make/.build-deb/docker-engine.docker.default create mode 120000 hack/make/.build-deb/docker-engine.docker.init create mode 120000 hack/make/.build-deb/docker-engine.docker.upstart create mode 100644 hack/make/.build-deb/docker-engine.install create mode 100644 hack/make/.build-deb/docker-engine.manpages create mode 100644 hack/make/.build-deb/docker-engine.postinst create mode 120000 hack/make/.build-deb/docker-engine.udev create mode 100644 hack/make/.build-deb/docs create mode 100755 hack/make/.build-deb/rules create mode 100644 hack/make/.build-rpm/docker-engine.spec create mode 100644 hack/make/.detect-daemon-osarch create mode 100644 hack/make/.ensure-emptyfs create mode 100644 hack/make/.go-autogen create mode 100644 hack/make/.go-autogen.ps1 create mode 100644 hack/make/.integration-daemon-setup create mode 100644 hack/make/.integration-daemon-start create mode 100644 hack/make/.integration-daemon-stop create mode 100644 hack/make/.integration-test-helpers create mode 100644 hack/make/.resources-windows/common.rc create mode 100644 hack/make/.resources-windows/docker.exe.manifest create mode 100644 hack/make/.resources-windows/docker.ico create mode 100644 hack/make/.resources-windows/docker.png create mode 100644 hack/make/.resources-windows/docker.rc create mode 100644 hack/make/.resources-windows/dockerd.rc create mode 100644 hack/make/.resources-windows/event_messages.mc create mode 100644 hack/make/.resources-windows/resources.go create mode 100644 hack/make/README.md create mode 100644 hack/make/binary create mode 100644 hack/make/binary-daemon create mode 100644 hack/make/binary-docker create mode 100644 hack/make/build-deb create mode 100644 hack/make/build-integration-test-binary create mode 100644 hack/make/build-rpm create mode 100755 hack/make/clean-apt-repo create mode 100755 hack/make/clean-yum-repo create mode 100644 hack/make/cover create mode 100644 hack/make/cross create mode 100644 hack/make/dynbinary create mode 100644 hack/make/dynbinary-daemon create mode 100644 hack/make/dynbinary-docker create mode 100755 hack/make/generate-index-listing create mode 100755 hack/make/install-binary create mode 100644 hack/make/install-binary-daemon create mode 100644 hack/make/install-script create mode 100755 hack/make/release-deb create mode 100755 hack/make/release-rpm create mode 100644 hack/make/run create mode 100755 hack/make/sign-repos create mode 100755 hack/make/test-deb-install create mode 100644 hack/make/test-docker-py create mode 100755 hack/make/test-install-script create mode 100755 hack/make/test-integration-cli create mode 100644 hack/make/test-integration-shell create mode 100755 hack/make/test-old-apt-repo create mode 100644 hack/make/test-unit create mode 100644 hack/make/tgz create mode 100644 hack/make/ubuntu create mode 100755 hack/make/update-apt-repo create mode 100644 hack/make/win create mode 100755 hack/release.sh create mode 100644 hack/validate/.swagger-yamllint create mode 100644 hack/validate/.validate create mode 100755 hack/validate/all create mode 100755 hack/validate/changelog-date-descending create mode 100755 hack/validate/changelog-well-formed create mode 100755 hack/validate/dco create mode 100755 hack/validate/default create mode 100755 hack/validate/default-seccomp create mode 100755 hack/validate/gofmt create mode 100755 hack/validate/lint create mode 100755 hack/validate/pkg-imports create mode 100755 hack/validate/swagger create mode 100755 hack/validate/swagger-gen create mode 100755 hack/validate/test-imports create mode 100755 hack/validate/toml create mode 100755 hack/validate/vendor create mode 100755 hack/validate/vet create mode 100755 hack/vendor.sh create mode 100644 image/cache/cache.go create mode 100644 image/cache/compare.go create mode 100644 image/cache/compare_test.go create mode 100644 image/fs.go create mode 100644 image/fs_test.go create mode 100644 image/image.go create mode 100644 image/image_test.go create mode 100644 image/rootfs.go create mode 100644 image/spec/v1.1.md create mode 100644 image/spec/v1.2.md create mode 100644 image/spec/v1.md create mode 100644 image/store.go create mode 100644 image/store_test.go create mode 100644 image/tarexport/load.go create mode 100644 image/tarexport/save.go create mode 100644 image/tarexport/tarexport.go create mode 100644 image/v1/imagev1.go create mode 100644 image/v1/imagev1_test.go create mode 100644 integration-cli/benchmark_test.go create mode 100644 integration-cli/check_test.go create mode 100644 integration-cli/checker/checker.go create mode 100644 integration-cli/cli/build/build.go create mode 100644 integration-cli/cli/build/fakecontext/context.go create mode 100644 integration-cli/cli/build/fakegit/fakegit.go create mode 100644 integration-cli/cli/build/fakestorage/fixtures.go create mode 100644 integration-cli/cli/build/fakestorage/storage.go create mode 100644 integration-cli/cli/cli.go create mode 100644 integration-cli/daemon/daemon.go create mode 100644 integration-cli/daemon/daemon_swarm.go create mode 100644 integration-cli/daemon/daemon_unix.go create mode 100644 integration-cli/daemon/daemon_windows.go create mode 100644 integration-cli/daemon_swarm_hack_test.go create mode 100644 integration-cli/docker_api_attach_test.go create mode 100644 integration-cli/docker_api_auth_test.go create mode 100644 integration-cli/docker_api_build_test.go create mode 100644 integration-cli/docker_api_containers_test.go create mode 100644 integration-cli/docker_api_create_test.go create mode 100644 integration-cli/docker_api_events_test.go create mode 100644 integration-cli/docker_api_exec_resize_test.go create mode 100644 integration-cli/docker_api_exec_test.go create mode 100644 integration-cli/docker_api_images_test.go create mode 100644 integration-cli/docker_api_info_test.go create mode 100644 integration-cli/docker_api_inspect_test.go create mode 100644 integration-cli/docker_api_inspect_unix_test.go create mode 100644 integration-cli/docker_api_logs_test.go create mode 100644 integration-cli/docker_api_network_test.go create mode 100644 integration-cli/docker_api_resize_test.go create mode 100644 integration-cli/docker_api_session_test.go create mode 100644 integration-cli/docker_api_stats_test.go create mode 100644 integration-cli/docker_api_stats_unix_test.go create mode 100644 integration-cli/docker_api_swarm_config_test.go create mode 100644 integration-cli/docker_api_swarm_node_test.go create mode 100644 integration-cli/docker_api_swarm_secret_test.go create mode 100644 integration-cli/docker_api_swarm_service_test.go create mode 100644 integration-cli/docker_api_swarm_test.go create mode 100644 integration-cli/docker_api_test.go create mode 100644 integration-cli/docker_api_update_unix_test.go create mode 100644 integration-cli/docker_api_version_test.go create mode 100644 integration-cli/docker_api_volumes_test.go create mode 100644 integration-cli/docker_cli_attach_test.go create mode 100644 integration-cli/docker_cli_attach_unix_test.go create mode 100644 integration-cli/docker_cli_authz_plugin_v2_test.go create mode 100644 integration-cli/docker_cli_authz_unix_test.go create mode 100644 integration-cli/docker_cli_build_test.go create mode 100644 integration-cli/docker_cli_build_unix_test.go create mode 100644 integration-cli/docker_cli_by_digest_test.go create mode 100644 integration-cli/docker_cli_commit_test.go create mode 100644 integration-cli/docker_cli_config_create_test.go create mode 100644 integration-cli/docker_cli_config_inspect_test.go create mode 100644 integration-cli/docker_cli_config_ls_test.go create mode 100644 integration-cli/docker_cli_config_test.go create mode 100644 integration-cli/docker_cli_cp_from_container_test.go create mode 100644 integration-cli/docker_cli_cp_test.go create mode 100644 integration-cli/docker_cli_cp_to_container_test.go create mode 100644 integration-cli/docker_cli_cp_to_container_unix_test.go create mode 100644 integration-cli/docker_cli_cp_utils_test.go create mode 100644 integration-cli/docker_cli_create_test.go create mode 100644 integration-cli/docker_cli_create_unix_test.go create mode 100644 integration-cli/docker_cli_daemon_plugins_test.go create mode 100644 integration-cli/docker_cli_daemon_test.go create mode 100644 integration-cli/docker_cli_diff_test.go create mode 100644 integration-cli/docker_cli_events_test.go create mode 100644 integration-cli/docker_cli_events_unix_test.go create mode 100644 integration-cli/docker_cli_exec_test.go create mode 100644 integration-cli/docker_cli_exec_unix_test.go create mode 100644 integration-cli/docker_cli_experimental_test.go create mode 100644 integration-cli/docker_cli_export_import_test.go create mode 100644 integration-cli/docker_cli_external_graphdriver_unix_test.go create mode 100644 integration-cli/docker_cli_external_volume_driver_unix_test.go create mode 100644 integration-cli/docker_cli_health_test.go create mode 100644 integration-cli/docker_cli_help_test.go create mode 100644 integration-cli/docker_cli_history_test.go create mode 100644 integration-cli/docker_cli_images_test.go create mode 100644 integration-cli/docker_cli_import_test.go create mode 100644 integration-cli/docker_cli_info_test.go create mode 100644 integration-cli/docker_cli_info_unix_test.go create mode 100644 integration-cli/docker_cli_inspect_test.go create mode 100644 integration-cli/docker_cli_kill_test.go create mode 100644 integration-cli/docker_cli_links_test.go create mode 100644 integration-cli/docker_cli_links_unix_test.go create mode 100644 integration-cli/docker_cli_login_test.go create mode 100644 integration-cli/docker_cli_logout_test.go create mode 100644 integration-cli/docker_cli_logs_bench_test.go create mode 100644 integration-cli/docker_cli_logs_test.go create mode 100644 integration-cli/docker_cli_nat_test.go create mode 100644 integration-cli/docker_cli_netmode_test.go create mode 100644 integration-cli/docker_cli_network_unix_test.go create mode 100644 integration-cli/docker_cli_oom_killed_test.go create mode 100644 integration-cli/docker_cli_pause_test.go create mode 100644 integration-cli/docker_cli_plugins_logdriver_test.go create mode 100644 integration-cli/docker_cli_plugins_test.go create mode 100644 integration-cli/docker_cli_port_test.go create mode 100644 integration-cli/docker_cli_proxy_test.go create mode 100644 integration-cli/docker_cli_prune_unix_test.go create mode 100644 integration-cli/docker_cli_ps_test.go create mode 100644 integration-cli/docker_cli_pull_local_test.go create mode 100644 integration-cli/docker_cli_pull_test.go create mode 100644 integration-cli/docker_cli_pull_trusted_test.go create mode 100644 integration-cli/docker_cli_push_test.go create mode 100644 integration-cli/docker_cli_registry_user_agent_test.go create mode 100644 integration-cli/docker_cli_rename_test.go create mode 100644 integration-cli/docker_cli_restart_test.go create mode 100644 integration-cli/docker_cli_rm_test.go create mode 100644 integration-cli/docker_cli_rmi_test.go create mode 100644 integration-cli/docker_cli_run_test.go create mode 100644 integration-cli/docker_cli_run_unix_test.go create mode 100644 integration-cli/docker_cli_save_load_test.go create mode 100644 integration-cli/docker_cli_save_load_unix_test.go create mode 100644 integration-cli/docker_cli_search_test.go create mode 100644 integration-cli/docker_cli_secret_create_test.go create mode 100644 integration-cli/docker_cli_secret_inspect_test.go create mode 100644 integration-cli/docker_cli_secret_ls_test.go create mode 100644 integration-cli/docker_cli_service_create_test.go create mode 100644 integration-cli/docker_cli_service_health_test.go create mode 100644 integration-cli/docker_cli_service_logs_test.go create mode 100644 integration-cli/docker_cli_service_scale_test.go create mode 100644 integration-cli/docker_cli_service_update_test.go create mode 100644 integration-cli/docker_cli_sni_test.go create mode 100644 integration-cli/docker_cli_stack_test.go create mode 100644 integration-cli/docker_cli_start_test.go create mode 100644 integration-cli/docker_cli_stats_test.go create mode 100644 integration-cli/docker_cli_stop_test.go create mode 100644 integration-cli/docker_cli_swarm_test.go create mode 100644 integration-cli/docker_cli_swarm_unix_test.go create mode 100644 integration-cli/docker_cli_tag_test.go create mode 100644 integration-cli/docker_cli_top_test.go create mode 100644 integration-cli/docker_cli_update_test.go create mode 100644 integration-cli/docker_cli_update_unix_test.go create mode 100644 integration-cli/docker_cli_userns_test.go create mode 100644 integration-cli/docker_cli_v2_only_test.go create mode 100644 integration-cli/docker_cli_version_test.go create mode 100644 integration-cli/docker_cli_volume_test.go create mode 100644 integration-cli/docker_cli_wait_test.go create mode 100644 integration-cli/docker_deprecated_api_v124_test.go create mode 100644 integration-cli/docker_deprecated_api_v124_unix_test.go create mode 100644 integration-cli/docker_experimental_network_test.go create mode 100644 integration-cli/docker_hub_pull_suite_test.go create mode 100644 integration-cli/docker_utils_test.go create mode 100644 integration-cli/environment/clean.go create mode 100644 integration-cli/environment/environment.go create mode 100644 integration-cli/environment/protect.go create mode 100644 integration-cli/events_utils_test.go create mode 100755 integration-cli/fixtures/auth/docker-credential-shell-test create mode 100644 integration-cli/fixtures/credentialspecs/valid.json create mode 100644 integration-cli/fixtures/deploy/default.yaml create mode 100644 integration-cli/fixtures/deploy/remove.yaml create mode 100644 integration-cli/fixtures/deploy/secrets.yaml create mode 100644 integration-cli/fixtures/https/ca.pem create mode 100644 integration-cli/fixtures/https/client-cert.pem create mode 100644 integration-cli/fixtures/https/client-key.pem create mode 100644 integration-cli/fixtures/https/client-rogue-cert.pem create mode 100644 integration-cli/fixtures/https/client-rogue-key.pem create mode 100644 integration-cli/fixtures/https/server-cert.pem create mode 100644 integration-cli/fixtures/https/server-key.pem create mode 100644 integration-cli/fixtures/https/server-rogue-cert.pem create mode 100644 integration-cli/fixtures/https/server-rogue-key.pem create mode 100644 integration-cli/fixtures/load/emptyLayer.tar create mode 100644 integration-cli/fixtures/load/frozen.go create mode 100644 integration-cli/fixtures/notary/delgkey1.crt create mode 100644 integration-cli/fixtures/notary/delgkey1.key create mode 100644 integration-cli/fixtures/notary/delgkey2.crt create mode 100644 integration-cli/fixtures/notary/delgkey2.key create mode 100644 integration-cli/fixtures/notary/delgkey3.crt create mode 100644 integration-cli/fixtures/notary/delgkey3.key create mode 100644 integration-cli/fixtures/notary/delgkey4.crt create mode 100644 integration-cli/fixtures/notary/delgkey4.key create mode 100755 integration-cli/fixtures/notary/gen.sh create mode 100644 integration-cli/fixtures/notary/localhost.cert create mode 100644 integration-cli/fixtures/notary/localhost.key create mode 100644 integration-cli/fixtures/registry/cert.pem create mode 100644 integration-cli/fixtures/secrets/default create mode 100644 integration-cli/fixtures_linux_daemon_test.go create mode 100644 integration-cli/registry/registry.go create mode 100644 integration-cli/registry/registry_mock.go create mode 100644 integration-cli/registry/requirement.go create mode 100644 integration-cli/request/npipe.go create mode 100644 integration-cli/request/npipe_windows.go create mode 100644 integration-cli/request/request.go create mode 100644 integration-cli/requirement/requirement.go create mode 100644 integration-cli/requirements_test.go create mode 100644 integration-cli/requirements_unix_test.go create mode 100644 integration-cli/test_vars_exec_test.go create mode 100644 integration-cli/test_vars_noexec_test.go create mode 100644 integration-cli/test_vars_noseccomp_test.go create mode 100644 integration-cli/test_vars_seccomp_test.go create mode 100644 integration-cli/test_vars_test.go create mode 100644 integration-cli/test_vars_unix_test.go create mode 100644 integration-cli/test_vars_windows_test.go create mode 100644 integration-cli/trust_server_test.go create mode 100644 integration-cli/utils_test.go create mode 100644 landr.conf.js create mode 100644 layer/empty.go create mode 100644 layer/empty_test.go create mode 100644 layer/filestore.go create mode 100644 layer/filestore_test.go create mode 100644 layer/filestore_unix.go create mode 100644 layer/filestore_windows.go create mode 100644 layer/layer.go create mode 100644 layer/layer_store.go create mode 100644 layer/layer_store_windows.go create mode 100644 layer/layer_test.go create mode 100644 layer/layer_unix.go create mode 100644 layer/layer_unix_test.go create mode 100644 layer/layer_windows.go create mode 100644 layer/migration.go create mode 100644 layer/migration_test.go create mode 100644 layer/mount_test.go create mode 100644 layer/mounted_layer.go create mode 100644 layer/ro_layer.go create mode 100644 layer/ro_layer_unix.go create mode 100644 layer/ro_layer_windows.go create mode 100644 libcontainerd/client.go create mode 100644 libcontainerd/client_linux.go create mode 100644 libcontainerd/client_solaris.go create mode 100644 libcontainerd/client_unix.go create mode 100644 libcontainerd/client_windows.go create mode 100644 libcontainerd/container.go create mode 100644 libcontainerd/container_unix.go create mode 100644 libcontainerd/container_windows.go create mode 100644 libcontainerd/oom_linux.go create mode 100644 libcontainerd/oom_solaris.go create mode 100644 libcontainerd/pausemonitor_unix.go create mode 100644 libcontainerd/process.go create mode 100644 libcontainerd/process_unix.go create mode 100644 libcontainerd/process_windows.go create mode 100644 libcontainerd/queue_unix.go create mode 100644 libcontainerd/queue_unix_test.go create mode 100644 libcontainerd/remote.go create mode 100644 libcontainerd/remote_unix.go create mode 100644 libcontainerd/remote_windows.go create mode 100644 libcontainerd/types.go create mode 100644 libcontainerd/types_linux.go create mode 100644 libcontainerd/types_solaris.go create mode 100644 libcontainerd/types_windows.go create mode 100644 libcontainerd/utils_linux.go create mode 100644 libcontainerd/utils_solaris.go create mode 100644 libcontainerd/utils_windows.go create mode 100644 libcontainerd/utils_windows_test.go create mode 100644 migrate/v1/migratev1.go create mode 100644 migrate/v1/migratev1_test.go create mode 100644 oci/defaults.go create mode 100644 oci/devices_linux.go create mode 100644 oci/devices_unsupported.go create mode 100644 oci/namespaces.go create mode 100644 opts/env.go create mode 100644 opts/env_test.go create mode 100644 opts/hosts.go create mode 100644 opts/hosts_test.go create mode 100644 opts/hosts_unix.go create mode 100644 opts/hosts_windows.go create mode 100644 opts/ip.go create mode 100644 opts/ip_test.go create mode 100644 opts/opts.go create mode 100644 opts/opts_test.go create mode 100644 opts/opts_unix.go create mode 100644 opts/opts_windows.go create mode 100644 opts/quotedstring.go create mode 100644 opts/quotedstring_test.go create mode 100644 opts/runtime.go create mode 100644 opts/ulimit.go create mode 100644 opts/ulimit_test.go create mode 100644 packaging/docker-engine.service create mode 100644 packaging/docker-engine.socket create mode 100644 pkg/README.md create mode 100644 pkg/aaparser/aaparser.go create mode 100644 pkg/aaparser/aaparser_test.go create mode 100644 pkg/archive/README.md create mode 100644 pkg/archive/archive.go create mode 100644 pkg/archive/archive_linux.go create mode 100644 pkg/archive/archive_linux_test.go create mode 100644 pkg/archive/archive_other.go create mode 100644 pkg/archive/archive_test.go create mode 100644 pkg/archive/archive_unix.go create mode 100644 pkg/archive/archive_unix_test.go create mode 100644 pkg/archive/archive_windows.go create mode 100644 pkg/archive/archive_windows_test.go create mode 100644 pkg/archive/changes.go create mode 100644 pkg/archive/changes_linux.go create mode 100644 pkg/archive/changes_other.go create mode 100644 pkg/archive/changes_posix_test.go create mode 100644 pkg/archive/changes_test.go create mode 100644 pkg/archive/changes_unix.go create mode 100644 pkg/archive/changes_windows.go create mode 100644 pkg/archive/copy.go create mode 100644 pkg/archive/copy_unix.go create mode 100644 pkg/archive/copy_unix_test.go create mode 100644 pkg/archive/copy_windows.go create mode 100644 pkg/archive/diff.go create mode 100644 pkg/archive/diff_test.go create mode 100644 pkg/archive/example_changes.go create mode 100644 pkg/archive/testdata/broken.tar create mode 100644 pkg/archive/time_linux.go create mode 100644 pkg/archive/time_unsupported.go create mode 100644 pkg/archive/utils_test.go create mode 100644 pkg/archive/whiteouts.go create mode 100644 pkg/archive/wrap.go create mode 100644 pkg/archive/wrap_test.go create mode 100644 pkg/authorization/api.go create mode 100644 pkg/authorization/api_test.go create mode 100644 pkg/authorization/authz.go create mode 100644 pkg/authorization/authz_unix_test.go create mode 100644 pkg/authorization/middleware.go create mode 100644 pkg/authorization/middleware_test.go create mode 100644 pkg/authorization/middleware_unix_test.go create mode 100644 pkg/authorization/plugin.go create mode 100644 pkg/authorization/response.go create mode 100644 pkg/broadcaster/unbuffered.go create mode 100644 pkg/broadcaster/unbuffered_test.go create mode 100644 pkg/chrootarchive/archive.go create mode 100644 pkg/chrootarchive/archive_test.go create mode 100644 pkg/chrootarchive/archive_unix.go create mode 100644 pkg/chrootarchive/archive_windows.go create mode 100644 pkg/chrootarchive/chroot_linux.go create mode 100644 pkg/chrootarchive/chroot_unix.go create mode 100644 pkg/chrootarchive/diff.go create mode 100644 pkg/chrootarchive/diff_unix.go create mode 100644 pkg/chrootarchive/diff_windows.go create mode 100644 pkg/chrootarchive/init_unix.go create mode 100644 pkg/chrootarchive/init_windows.go create mode 100644 pkg/devicemapper/devmapper.go create mode 100644 pkg/devicemapper/devmapper_log.go create mode 100644 pkg/devicemapper/devmapper_wrapper.go create mode 100644 pkg/devicemapper/devmapper_wrapper_deferred_remove.go create mode 100644 pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 pkg/devicemapper/ioctl.go create mode 100644 pkg/devicemapper/log.go create mode 100644 pkg/directory/directory.go create mode 100644 pkg/directory/directory_test.go create mode 100644 pkg/directory/directory_unix.go create mode 100644 pkg/directory/directory_windows.go create mode 100644 pkg/discovery/README.md create mode 100644 pkg/discovery/backends.go create mode 100644 pkg/discovery/discovery.go create mode 100644 pkg/discovery/discovery_test.go create mode 100644 pkg/discovery/entry.go create mode 100644 pkg/discovery/file/file.go create mode 100644 pkg/discovery/file/file_test.go create mode 100644 pkg/discovery/generator.go create mode 100644 pkg/discovery/generator_test.go create mode 100644 pkg/discovery/kv/kv.go create mode 100644 pkg/discovery/kv/kv_test.go create mode 100644 pkg/discovery/memory/memory.go create mode 100644 pkg/discovery/memory/memory_test.go create mode 100644 pkg/discovery/nodes/nodes.go create mode 100644 pkg/discovery/nodes/nodes_test.go create mode 100644 pkg/filenotify/filenotify.go create mode 100644 pkg/filenotify/fsnotify.go create mode 100644 pkg/filenotify/poller.go create mode 100644 pkg/filenotify/poller_test.go create mode 100644 pkg/fileutils/fileutils.go create mode 100644 pkg/fileutils/fileutils_darwin.go create mode 100644 pkg/fileutils/fileutils_solaris.go create mode 100644 pkg/fileutils/fileutils_test.go create mode 100644 pkg/fileutils/fileutils_unix.go create mode 100644 pkg/fileutils/fileutils_windows.go create mode 100644 pkg/fsutils/fsutils_linux.go create mode 100644 pkg/fsutils/fsutils_linux_test.go create mode 100644 pkg/homedir/homedir.go create mode 100644 pkg/homedir/homedir_linux.go create mode 100644 pkg/homedir/homedir_others.go create mode 100644 pkg/homedir/homedir_test.go create mode 100644 pkg/idtools/idtools.go create mode 100644 pkg/idtools/idtools_unix.go create mode 100644 pkg/idtools/idtools_unix_test.go create mode 100644 pkg/idtools/idtools_windows.go create mode 100644 pkg/idtools/usergroupadd_linux.go create mode 100644 pkg/idtools/usergroupadd_unsupported.go create mode 100644 pkg/idtools/utils_unix.go create mode 100644 pkg/ioutils/buffer.go create mode 100644 pkg/ioutils/buffer_test.go create mode 100644 pkg/ioutils/bytespipe.go create mode 100644 pkg/ioutils/bytespipe_test.go create mode 100644 pkg/ioutils/concat.go create mode 100644 pkg/ioutils/fswriters.go create mode 100644 pkg/ioutils/fswriters_test.go create mode 100644 pkg/ioutils/readers.go create mode 100644 pkg/ioutils/readers_test.go create mode 100644 pkg/ioutils/temp_unix.go create mode 100644 pkg/ioutils/temp_windows.go create mode 100644 pkg/ioutils/writeflusher.go create mode 100644 pkg/ioutils/writers.go create mode 100644 pkg/ioutils/writers_test.go create mode 100644 pkg/jsonlog/jsonlog.go create mode 100644 pkg/jsonlog/jsonlog_marshalling.go create mode 100644 pkg/jsonlog/jsonlog_marshalling_test.go create mode 100644 pkg/jsonlog/jsonlogbytes.go create mode 100644 pkg/jsonlog/jsonlogbytes_test.go create mode 100644 pkg/jsonlog/time_marshalling.go create mode 100644 pkg/jsonlog/time_marshalling_test.go create mode 100644 pkg/jsonmessage/jsonmessage.go create mode 100644 pkg/jsonmessage/jsonmessage_test.go create mode 100644 pkg/listeners/group_unix.go create mode 100644 pkg/listeners/listeners_solaris.go create mode 100644 pkg/listeners/listeners_unix.go create mode 100644 pkg/listeners/listeners_windows.go create mode 100644 pkg/locker/README.md create mode 100644 pkg/locker/locker.go create mode 100644 pkg/locker/locker_test.go create mode 100644 pkg/longpath/longpath.go create mode 100644 pkg/longpath/longpath_test.go create mode 100644 pkg/loopback/attach_loopback.go create mode 100644 pkg/loopback/ioctl.go create mode 100644 pkg/loopback/loop_wrapper.go create mode 100644 pkg/loopback/loopback.go create mode 100644 pkg/mount/flags.go create mode 100644 pkg/mount/flags_freebsd.go create mode 100644 pkg/mount/flags_linux.go create mode 100644 pkg/mount/flags_unsupported.go create mode 100644 pkg/mount/mount.go create mode 100644 pkg/mount/mount_unix_test.go create mode 100644 pkg/mount/mounter_freebsd.go create mode 100644 pkg/mount/mounter_linux.go create mode 100644 pkg/mount/mounter_linux_test.go create mode 100644 pkg/mount/mounter_solaris.go create mode 100644 pkg/mount/mounter_unsupported.go create mode 100644 pkg/mount/mountinfo.go create mode 100644 pkg/mount/mountinfo_freebsd.go create mode 100644 pkg/mount/mountinfo_linux.go create mode 100644 pkg/mount/mountinfo_linux_test.go create mode 100644 pkg/mount/mountinfo_solaris.go create mode 100644 pkg/mount/mountinfo_unsupported.go create mode 100644 pkg/mount/mountinfo_windows.go create mode 100644 pkg/mount/sharedsubtree_linux.go create mode 100644 pkg/mount/sharedsubtree_linux_test.go create mode 100644 pkg/mount/sharedsubtree_solaris.go create mode 100644 pkg/namesgenerator/cmd/names-generator/main.go create mode 100644 pkg/namesgenerator/names-generator.go create mode 100644 pkg/namesgenerator/names-generator_test.go create mode 100644 pkg/parsers/kernel/kernel.go create mode 100644 pkg/parsers/kernel/kernel_darwin.go create mode 100644 pkg/parsers/kernel/kernel_unix.go create mode 100644 pkg/parsers/kernel/kernel_unix_test.go create mode 100644 pkg/parsers/kernel/kernel_windows.go create mode 100644 pkg/parsers/kernel/uname_linux.go create mode 100644 pkg/parsers/kernel/uname_solaris.go create mode 100644 pkg/parsers/kernel/uname_unsupported.go create mode 100644 pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 pkg/parsers/operatingsystem/operatingsystem_solaris.go create mode 100644 pkg/parsers/operatingsystem/operatingsystem_unix.go create mode 100644 pkg/parsers/operatingsystem/operatingsystem_unix_test.go create mode 100644 pkg/parsers/operatingsystem/operatingsystem_windows.go create mode 100644 pkg/parsers/parsers.go create mode 100644 pkg/parsers/parsers_test.go create mode 100644 pkg/pidfile/pidfile.go create mode 100644 pkg/pidfile/pidfile_darwin.go create mode 100644 pkg/pidfile/pidfile_test.go create mode 100644 pkg/pidfile/pidfile_unix.go create mode 100644 pkg/pidfile/pidfile_windows.go create mode 100644 pkg/platform/architecture_linux.go create mode 100644 pkg/platform/architecture_unix.go create mode 100644 pkg/platform/architecture_windows.go create mode 100644 pkg/platform/platform.go create mode 100644 pkg/platform/utsname_int8.go create mode 100644 pkg/platform/utsname_int8_test.go create mode 100644 pkg/platform/utsname_uint8.go create mode 100644 pkg/platform/utsname_uint8_test.go create mode 100644 pkg/plugingetter/getter.go create mode 100644 pkg/plugins/client.go create mode 100644 pkg/plugins/client_test.go create mode 100644 pkg/plugins/discovery.go create mode 100644 pkg/plugins/discovery_test.go create mode 100644 pkg/plugins/discovery_unix.go create mode 100644 pkg/plugins/discovery_unix_test.go create mode 100644 pkg/plugins/discovery_windows.go create mode 100644 pkg/plugins/errors.go create mode 100644 pkg/plugins/plugin_test.go create mode 100644 pkg/plugins/pluginrpc-gen/README.md create mode 100644 pkg/plugins/pluginrpc-gen/fixtures/foo.go create mode 100644 pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go create mode 100644 pkg/plugins/pluginrpc-gen/main.go create mode 100644 pkg/plugins/pluginrpc-gen/parser.go create mode 100644 pkg/plugins/pluginrpc-gen/parser_test.go create mode 100644 pkg/plugins/pluginrpc-gen/template.go create mode 100644 pkg/plugins/plugins.go create mode 100644 pkg/plugins/plugins_unix.go create mode 100644 pkg/plugins/plugins_windows.go create mode 100644 pkg/plugins/transport/http.go create mode 100644 pkg/plugins/transport/http_test.go create mode 100644 pkg/plugins/transport/transport.go create mode 100644 pkg/pools/pools.go create mode 100644 pkg/pools/pools_test.go create mode 100644 pkg/progress/progress.go create mode 100644 pkg/progress/progressreader.go create mode 100644 pkg/progress/progressreader_test.go create mode 100644 pkg/progress/progresssink.go create mode 100644 pkg/promise/promise.go create mode 100644 pkg/promise/promise_test.go create mode 100644 pkg/pubsub/publisher.go create mode 100644 pkg/pubsub/publisher_test.go create mode 100644 pkg/reexec/README.md create mode 100644 pkg/reexec/command_linux.go create mode 100644 pkg/reexec/command_unix.go create mode 100644 pkg/reexec/command_unsupported.go create mode 100644 pkg/reexec/command_windows.go create mode 100644 pkg/reexec/reexec.go create mode 100644 pkg/reexec/reexec_test.go create mode 100644 pkg/registrar/registrar.go create mode 100644 pkg/registrar/registrar_test.go create mode 100644 pkg/signal/README.md create mode 100644 pkg/signal/signal.go create mode 100644 pkg/signal/signal_darwin.go create mode 100644 pkg/signal/signal_freebsd.go create mode 100644 pkg/signal/signal_linux.go create mode 100644 pkg/signal/signal_linux_test.go create mode 100644 pkg/signal/signal_solaris.go create mode 100644 pkg/signal/signal_test.go create mode 100644 pkg/signal/signal_unix.go create mode 100644 pkg/signal/signal_unsupported.go create mode 100644 pkg/signal/signal_windows.go create mode 100644 pkg/signal/trap.go create mode 100644 pkg/stdcopy/stdcopy.go create mode 100644 pkg/stdcopy/stdcopy_test.go create mode 100644 pkg/streamformatter/streamformatter.go create mode 100644 pkg/streamformatter/streamformatter_test.go create mode 100644 pkg/streamformatter/streamwriter.go create mode 100644 pkg/streamformatter/streamwriter_test.go create mode 100644 pkg/stringid/README.md create mode 100644 pkg/stringid/stringid.go create mode 100644 pkg/stringid/stringid_test.go create mode 100644 pkg/stringutils/README.md create mode 100644 pkg/stringutils/stringutils.go create mode 100644 pkg/stringutils/stringutils_test.go create mode 100644 pkg/symlink/LICENSE.APACHE create mode 100644 pkg/symlink/LICENSE.BSD create mode 100644 pkg/symlink/README.md create mode 100644 pkg/symlink/fs.go create mode 100644 pkg/symlink/fs_unix.go create mode 100644 pkg/symlink/fs_unix_test.go create mode 100644 pkg/symlink/fs_windows.go create mode 100644 pkg/sysinfo/README.md create mode 100644 pkg/sysinfo/numcpu.go create mode 100644 pkg/sysinfo/numcpu_linux.go create mode 100644 pkg/sysinfo/numcpu_windows.go create mode 100644 pkg/sysinfo/sysinfo.go create mode 100644 pkg/sysinfo/sysinfo_linux.go create mode 100644 pkg/sysinfo/sysinfo_linux_test.go create mode 100644 pkg/sysinfo/sysinfo_solaris.go create mode 100644 pkg/sysinfo/sysinfo_test.go create mode 100644 pkg/sysinfo/sysinfo_unix.go create mode 100644 pkg/sysinfo/sysinfo_windows.go create mode 100644 pkg/system/chtimes.go create mode 100644 pkg/system/chtimes_test.go create mode 100644 pkg/system/chtimes_unix.go create mode 100644 pkg/system/chtimes_unix_test.go create mode 100644 pkg/system/chtimes_windows.go create mode 100644 pkg/system/chtimes_windows_test.go create mode 100644 pkg/system/errors.go create mode 100644 pkg/system/events_windows.go create mode 100644 pkg/system/exitcode.go create mode 100644 pkg/system/filesys.go create mode 100644 pkg/system/filesys_windows.go create mode 100644 pkg/system/init.go create mode 100644 pkg/system/init_windows.go create mode 100644 pkg/system/lcow_unix.go create mode 100644 pkg/system/lcow_windows.go create mode 100644 pkg/system/lstat_unix.go create mode 100644 pkg/system/lstat_unix_test.go create mode 100644 pkg/system/lstat_windows.go create mode 100644 pkg/system/meminfo.go create mode 100644 pkg/system/meminfo_linux.go create mode 100644 pkg/system/meminfo_solaris.go create mode 100644 pkg/system/meminfo_unix_test.go create mode 100644 pkg/system/meminfo_unsupported.go create mode 100644 pkg/system/meminfo_windows.go create mode 100644 pkg/system/mknod.go create mode 100644 pkg/system/mknod_windows.go create mode 100644 pkg/system/path.go create mode 100644 pkg/system/path_unix.go create mode 100644 pkg/system/path_windows.go create mode 100644 pkg/system/path_windows_test.go create mode 100644 pkg/system/process_unix.go create mode 100644 pkg/system/rm.go create mode 100644 pkg/system/rm_test.go create mode 100644 pkg/system/stat_darwin.go create mode 100644 pkg/system/stat_freebsd.go create mode 100644 pkg/system/stat_linux.go create mode 100644 pkg/system/stat_openbsd.go create mode 100644 pkg/system/stat_solaris.go create mode 100644 pkg/system/stat_unix.go create mode 100644 pkg/system/stat_unix_test.go create mode 100644 pkg/system/stat_windows.go create mode 100644 pkg/system/syscall_unix.go create mode 100644 pkg/system/syscall_windows.go create mode 100644 pkg/system/syscall_windows_test.go create mode 100644 pkg/system/umask.go create mode 100644 pkg/system/umask_windows.go create mode 100644 pkg/system/utimes_freebsd.go create mode 100644 pkg/system/utimes_linux.go create mode 100644 pkg/system/utimes_unix_test.go create mode 100644 pkg/system/utimes_unsupported.go create mode 100644 pkg/system/xattrs_linux.go create mode 100644 pkg/system/xattrs_unsupported.go create mode 100644 pkg/tailfile/tailfile.go create mode 100644 pkg/tailfile/tailfile_test.go create mode 100644 pkg/tarsplitutils/tarstream.go create mode 100644 pkg/tarsum/builder_context.go create mode 100644 pkg/tarsum/builder_context_test.go create mode 100644 pkg/tarsum/fileinfosums.go create mode 100644 pkg/tarsum/fileinfosums_test.go create mode 100644 pkg/tarsum/tarsum.go create mode 100644 pkg/tarsum/tarsum_spec.md create mode 100644 pkg/tarsum/tarsum_test.go create mode 100644 pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 pkg/tarsum/testdata/xattr/json create mode 100644 pkg/tarsum/testdata/xattr/layer.tar create mode 100644 pkg/tarsum/versioning.go create mode 100644 pkg/tarsum/versioning_test.go create mode 100644 pkg/tarsum/writercloser.go create mode 100644 pkg/templates/templates.go create mode 100644 pkg/templates/templates_test.go create mode 100644 pkg/term/ascii.go create mode 100644 pkg/term/ascii_test.go create mode 100644 pkg/term/proxy.go create mode 100644 pkg/term/proxy_test.go create mode 100644 pkg/term/tc.go create mode 100644 pkg/term/tc_solaris_cgo.go create mode 100644 pkg/term/term.go create mode 100644 pkg/term/term_linux_test.go create mode 100644 pkg/term/term_windows.go create mode 100644 pkg/term/termios_bsd.go create mode 100644 pkg/term/termios_linux.go create mode 100644 pkg/term/windows/ansi_reader.go create mode 100644 pkg/term/windows/ansi_writer.go create mode 100644 pkg/term/windows/console.go create mode 100644 pkg/term/windows/windows.go create mode 100644 pkg/term/windows/windows_test.go create mode 100644 pkg/term/winsize.go create mode 100644 pkg/term/winsize_solaris_cgo.go create mode 100644 pkg/testutil/cmd/command.go create mode 100644 pkg/testutil/cmd/command_test.go create mode 100644 pkg/testutil/golden/golden.go create mode 100644 pkg/testutil/helpers.go create mode 100644 pkg/testutil/pkg.go create mode 100644 pkg/testutil/tempfile/tempfile.go create mode 100644 pkg/testutil/utils.go create mode 100644 pkg/testutil/utils_test.go create mode 100644 pkg/tlsconfig/tlsconfig_clone.go create mode 100644 pkg/tlsconfig/tlsconfig_clone_go17.go create mode 100644 pkg/truncindex/truncindex.go create mode 100644 pkg/truncindex/truncindex_test.go create mode 100644 pkg/urlutil/urlutil.go create mode 100644 pkg/urlutil/urlutil_test.go create mode 100644 pkg/useragent/README.md create mode 100644 pkg/useragent/useragent.go create mode 100644 pkg/useragent/useragent_test.go create mode 100644 plugin/backend_linux.go create mode 100644 plugin/backend_unsupported.go create mode 100644 plugin/blobstore.go create mode 100644 plugin/defs.go create mode 100644 plugin/manager.go create mode 100644 plugin/manager_linux.go create mode 100644 plugin/manager_solaris.go create mode 100644 plugin/manager_test.go create mode 100644 plugin/manager_windows.go create mode 100644 plugin/store.go create mode 100644 plugin/store_test.go create mode 100644 plugin/v2/plugin.go create mode 100644 plugin/v2/plugin_linux.go create mode 100644 plugin/v2/plugin_unsupported.go create mode 100644 plugin/v2/settable.go create mode 100644 plugin/v2/settable_test.go create mode 100644 poule.yml create mode 100644 profiles/apparmor/apparmor.go create mode 100644 profiles/apparmor/template.go create mode 100755 profiles/seccomp/default.json create mode 100755 profiles/seccomp/fixtures/example.json create mode 100644 profiles/seccomp/generate.go create mode 100644 profiles/seccomp/seccomp.go create mode 100644 profiles/seccomp/seccomp_default.go create mode 100644 profiles/seccomp/seccomp_test.go create mode 100644 profiles/seccomp/seccomp_unsupported.go create mode 100644 project/ARM.md create mode 100644 project/BRANCHES-AND-TAGS.md create mode 120000 project/CONTRIBUTING.md create mode 100644 project/GOVERNANCE.md create mode 100644 project/IRC-ADMINISTRATION.md create mode 100644 project/ISSUE-TRIAGE.md create mode 100644 project/PACKAGE-REPO-MAINTENANCE.md create mode 100644 project/PACKAGERS.md create mode 100644 project/PATCH-RELEASES.md create mode 100644 project/PRINCIPLES.md create mode 100644 project/README.md create mode 100644 project/RELEASE-CHECKLIST.md create mode 100644 project/RELEASE-PROCESS.md create mode 100644 project/REVIEWING.md create mode 100644 project/TOOLS.md create mode 100644 reference/store.go create mode 100644 reference/store_test.go create mode 100644 registry/auth.go create mode 100644 registry/auth_test.go create mode 100644 registry/config.go create mode 100644 registry/config_test.go create mode 100644 registry/config_unix.go create mode 100644 registry/config_windows.go create mode 100644 registry/endpoint_test.go create mode 100644 registry/endpoint_v1.go create mode 100644 registry/registry.go create mode 100644 registry/registry_mock_test.go create mode 100644 registry/registry_test.go create mode 100644 registry/resumable/resumablerequestreader.go create mode 100644 registry/resumable/resumablerequestreader_test.go create mode 100644 registry/service.go create mode 100644 registry/service_v1.go create mode 100644 registry/service_v1_test.go create mode 100644 registry/service_v2.go create mode 100644 registry/session.go create mode 100644 registry/types.go create mode 100644 reports/2017-05-01.md create mode 100644 reports/2017-05-08.md create mode 100644 reports/2017-05-15.md create mode 100644 reports/2017-06-05.md create mode 100644 reports/2017-06-12.md create mode 100644 reports/builder/2017-05-01.md create mode 100644 reports/builder/2017-05-08.md create mode 100644 reports/builder/2017-05-15.md create mode 100644 reports/builder/2017-05-22.md create mode 100644 reports/builder/2017-05-29.md create mode 100644 reports/builder/2017-06-05.md create mode 100644 reports/builder/2017-06-12.md create mode 100644 restartmanager/restartmanager.go create mode 100644 restartmanager/restartmanager_test.go create mode 100644 runconfig/config.go create mode 100644 runconfig/config_test.go create mode 100644 runconfig/config_unix.go create mode 100644 runconfig/config_windows.go create mode 100644 runconfig/errors.go create mode 100644 runconfig/fixtures/unix/container_config_1_14.json create mode 100644 runconfig/fixtures/unix/container_config_1_17.json create mode 100644 runconfig/fixtures/unix/container_config_1_19.json create mode 100644 runconfig/fixtures/unix/container_hostconfig_1_14.json create mode 100644 runconfig/fixtures/unix/container_hostconfig_1_19.json create mode 100644 runconfig/fixtures/windows/container_config_1_19.json create mode 100644 runconfig/hostconfig.go create mode 100644 runconfig/hostconfig_solaris.go create mode 100644 runconfig/hostconfig_test.go create mode 100644 runconfig/hostconfig_unix.go create mode 100644 runconfig/hostconfig_windows.go create mode 100644 runconfig/hostconfig_windows_test.go create mode 100644 runconfig/opts/parse.go create mode 100644 vendor.conf create mode 100644 vendor/cloud.google.com/go/LICENSE create mode 100644 vendor/cloud.google.com/go/README.md create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/cloud.google.com/go/internal/cloud.go create mode 100644 vendor/cloud.google.com/go/internal/retry.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/README.md create mode 100644 vendor/cloud.google.com/go/logging/apiv2/config_client.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/logging_client.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/metrics_client.go create mode 100644 vendor/cloud.google.com/go/logging/doc.go create mode 100644 vendor/cloud.google.com/go/logging/internal/common.go create mode 100644 vendor/cloud.google.com/go/logging/logging.go create mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE create mode 100644 vendor/github.com/Azure/go-ansiterm/README.md create mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go create mode 100644 vendor/github.com/Azure/go-ansiterm/context.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go create mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go create mode 100644 vendor/github.com/Azure/go-ansiterm/states.go create mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go create mode 100644 vendor/github.com/BurntSushi/toml/COPYING create mode 100644 vendor/github.com/BurntSushi/toml/README.md create mode 100644 vendor/github.com/BurntSushi/toml/decode.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_meta.go create mode 100644 vendor/github.com/BurntSushi/toml/doc.go create mode 100644 vendor/github.com/BurntSushi/toml/encode.go create mode 100644 vendor/github.com/BurntSushi/toml/encoding_types.go create mode 100644 vendor/github.com/BurntSushi/toml/encoding_types_1.1.go create mode 100644 vendor/github.com/BurntSushi/toml/lex.go create mode 100644 vendor/github.com/BurntSushi/toml/parse.go create mode 100644 vendor/github.com/BurntSushi/toml/type_check.go create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 vendor/github.com/Graylog2/go-gelf/LICENSE create mode 100644 vendor/github.com/Graylog2/go-gelf/README.md create mode 100644 vendor/github.com/Graylog2/go-gelf/gelf/reader.go create mode 100644 vendor/github.com/Graylog2/go-gelf/gelf/writer.go create mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/README.md create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/common.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/reader.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/writer.go create mode 100644 vendor/github.com/Microsoft/go-winio/backup.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/noop.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/tar.go create mode 100644 vendor/github.com/Microsoft/go-winio/file.go create mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go create mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go create mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd.go create mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go create mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/README.md create mode 100644 vendor/github.com/Microsoft/hcsshim/activatelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/baselayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/callback.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cgo.go create mode 100644 vendor/github.com/Microsoft/hcsshim/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/createlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/deactivatelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/destroylayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go create mode 100644 vendor/github.com/Microsoft/hcsshim/exportlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/getlayermountpath.go create mode 100644 vendor/github.com/Microsoft/hcsshim/getsharedbaseimages.go create mode 100644 vendor/github.com/Microsoft/hcsshim/guid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcsshim.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsfuncs.go create mode 100644 vendor/github.com/Microsoft/hcsshim/importlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/interface.go create mode 100644 vendor/github.com/Microsoft/hcsshim/layerexists.go create mode 100644 vendor/github.com/Microsoft/hcsshim/layerutils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/legacy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/nametoguid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/preparelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/processimage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/unpreparelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/utils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/version.go create mode 100644 vendor/github.com/Microsoft/hcsshim/waithelper.go create mode 100644 vendor/github.com/Microsoft/hcsshim/zhcsshim.go create mode 100644 vendor/github.com/Nvveen/Gotty/LICENSE create mode 100644 vendor/github.com/Nvveen/Gotty/README create mode 100644 vendor/github.com/Nvveen/Gotty/attributes.go create mode 100644 vendor/github.com/Nvveen/Gotty/gotty.go create mode 100644 vendor/github.com/Nvveen/Gotty/parser.go create mode 100644 vendor/github.com/Nvveen/Gotty/types.go create mode 100644 vendor/github.com/RackSec/srslog/LICENSE create mode 100644 vendor/github.com/RackSec/srslog/README.md create mode 100644 vendor/github.com/RackSec/srslog/constants.go create mode 100644 vendor/github.com/RackSec/srslog/dialer.go create mode 100644 vendor/github.com/RackSec/srslog/formatter.go create mode 100644 vendor/github.com/RackSec/srslog/framer.go create mode 100644 vendor/github.com/RackSec/srslog/net_conn.go create mode 100644 vendor/github.com/RackSec/srslog/srslog.go create mode 100644 vendor/github.com/RackSec/srslog/srslog_unix.go create mode 100644 vendor/github.com/RackSec/srslog/writer.go create mode 100644 vendor/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/github.com/Sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/Sirupsen/logrus/doc.go create mode 100644 vendor/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_appengine.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/agl/ed25519/LICENSE create mode 100644 vendor/github.com/agl/ed25519/ed25519.go create mode 100644 vendor/github.com/agl/ed25519/edwards25519/const.go create mode 100644 vendor/github.com/agl/ed25519/edwards25519/edwards25519.go create mode 100644 vendor/github.com/armon/go-metrics/LICENSE create mode 100644 vendor/github.com/armon/go-metrics/README.md create mode 100644 vendor/github.com/armon/go-metrics/const_unix.go create mode 100644 vendor/github.com/armon/go-metrics/const_windows.go create mode 100644 vendor/github.com/armon/go-metrics/inmem.go create mode 100644 vendor/github.com/armon/go-metrics/inmem_signal.go create mode 100755 vendor/github.com/armon/go-metrics/metrics.go create mode 100755 vendor/github.com/armon/go-metrics/sink.go create mode 100755 vendor/github.com/armon/go-metrics/start.go create mode 100644 vendor/github.com/armon/go-metrics/statsd.go create mode 100755 vendor/github.com/armon/go-metrics/statsite.go create mode 100644 vendor/github.com/armon/go-radix/LICENSE create mode 100644 vendor/github.com/armon/go-radix/README.md create mode 100644 vendor/github.com/armon/go-radix/radix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/README.md create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/boltdb/bolt/LICENSE create mode 100644 vendor/github.com/boltdb/bolt/README.md create mode 100644 vendor/github.com/boltdb/bolt/bolt_386.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_amd64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_arm.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_arm64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_linux.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_openbsd.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64le.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_s390x.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_unix.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_unix_solaris.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_windows.go create mode 100644 vendor/github.com/boltdb/bolt/boltsync_unix.go create mode 100644 vendor/github.com/boltdb/bolt/bucket.go create mode 100644 vendor/github.com/boltdb/bolt/cursor.go create mode 100644 vendor/github.com/boltdb/bolt/db.go create mode 100644 vendor/github.com/boltdb/bolt/doc.go create mode 100644 vendor/github.com/boltdb/bolt/errors.go create mode 100644 vendor/github.com/boltdb/bolt/freelist.go create mode 100644 vendor/github.com/boltdb/bolt/node.go create mode 100644 vendor/github.com/boltdb/bolt/page.go create mode 100644 vendor/github.com/boltdb/bolt/tx.go create mode 100644 vendor/github.com/bsphere/le_go/LICENSE create mode 100644 vendor/github.com/bsphere/le_go/README.md create mode 100644 vendor/github.com/bsphere/le_go/le.go create mode 100644 vendor/github.com/cloudflare/cfssl/LICENSE create mode 100644 vendor/github.com/cloudflare/cfssl/README.md create mode 100644 vendor/github.com/cloudflare/cfssl/api/api.go create mode 100644 vendor/github.com/cloudflare/cfssl/auth/auth.go create mode 100644 vendor/github.com/cloudflare/cfssl/certdb/README.md create mode 100644 vendor/github.com/cloudflare/cfssl/certdb/certdb.go create mode 100644 vendor/github.com/cloudflare/cfssl/config/config.go create mode 100644 vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go create mode 100644 vendor/github.com/cloudflare/cfssl/csr/csr.go create mode 100644 vendor/github.com/cloudflare/cfssl/errors/doc.go create mode 100644 vendor/github.com/cloudflare/cfssl/errors/error.go create mode 100644 vendor/github.com/cloudflare/cfssl/errors/http.go create mode 100644 vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go create mode 100644 vendor/github.com/cloudflare/cfssl/helpers/helpers.go create mode 100644 vendor/github.com/cloudflare/cfssl/info/info.go create mode 100644 vendor/github.com/cloudflare/cfssl/initca/initca.go create mode 100644 vendor/github.com/cloudflare/cfssl/log/log.go create mode 100644 vendor/github.com/cloudflare/cfssl/ocsp/config/config.go create mode 100644 vendor/github.com/cloudflare/cfssl/signer/local/local.go create mode 100644 vendor/github.com/cloudflare/cfssl/signer/signer.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/LICENSE create mode 100644 vendor/github.com/cloudfoundry/gosigar/NOTICE create mode 100644 vendor/github.com/cloudfoundry/gosigar/README.md create mode 100644 vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_format.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_interface.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_linux.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_unix.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_util.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_windows.go create mode 100644 vendor/github.com/containerd/console/LICENSE create mode 100644 vendor/github.com/containerd/console/README.md create mode 100644 vendor/github.com/containerd/console/console.go create mode 100644 vendor/github.com/containerd/console/console_unix.go create mode 100644 vendor/github.com/containerd/console/console_windows.go create mode 100644 vendor/github.com/containerd/console/tc_darwin.go create mode 100644 vendor/github.com/containerd/console/tc_freebsd.go create mode 100644 vendor/github.com/containerd/console/tc_linux.go create mode 100644 vendor/github.com/containerd/containerd/LICENSE.code create mode 100644 vendor/github.com/containerd/containerd/LICENSE.docs create mode 100644 vendor/github.com/containerd/containerd/NOTICE create mode 100644 vendor/github.com/containerd/containerd/README.md create mode 100644 vendor/github.com/containerd/containerd/api/grpc/server/server.go create mode 100644 vendor/github.com/containerd/containerd/api/grpc/server/server_linux.go create mode 100644 vendor/github.com/containerd/containerd/api/grpc/server/server_solaris.go create mode 100644 vendor/github.com/containerd/containerd/api/grpc/types/api.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/grpc/types/api.proto create mode 100644 vendor/github.com/containerd/containerd/api/http/pprof/pprof.go create mode 100644 vendor/github.com/containerd/containerd/archutils/epoll.go create mode 100644 vendor/github.com/containerd/containerd/archutils/epoll_arm64.go create mode 100644 vendor/github.com/containerd/containerd/containerd-shim/console.go create mode 100644 vendor/github.com/containerd/containerd/containerd-shim/console_solaris.go create mode 100644 vendor/github.com/containerd/containerd/containerd-shim/main.go create mode 100644 vendor/github.com/containerd/containerd/containerd-shim/process.go create mode 100644 vendor/github.com/containerd/containerd/containerd-shim/process_linux.go create mode 100644 vendor/github.com/containerd/containerd/containerd-shim/process_solaris.go create mode 100644 vendor/github.com/containerd/containerd/containerd/main.go create mode 100644 vendor/github.com/containerd/containerd/containerd/main_linux.go create mode 100644 vendor/github.com/containerd/containerd/containerd/main_solaris.go create mode 100644 vendor/github.com/containerd/containerd/ctr/checkpoint_linux.go create mode 100644 vendor/github.com/containerd/containerd/ctr/checkpoint_solaris.go create mode 100644 vendor/github.com/containerd/containerd/ctr/const.go create mode 100644 vendor/github.com/containerd/containerd/ctr/container.go create mode 100644 vendor/github.com/containerd/containerd/ctr/events.go create mode 100644 vendor/github.com/containerd/containerd/ctr/main.go create mode 100644 vendor/github.com/containerd/containerd/ctr/sort.go create mode 100644 vendor/github.com/containerd/containerd/osutils/fds.go create mode 100644 vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go create mode 100644 vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go create mode 100644 vendor/github.com/containerd/containerd/osutils/prctl.go create mode 100644 vendor/github.com/containerd/containerd/osutils/prctl_solaris.go create mode 100644 vendor/github.com/containerd/containerd/osutils/reaper.go create mode 100644 vendor/github.com/containerd/containerd/runtime/container.go create mode 100644 vendor/github.com/containerd/containerd/runtime/container_linux.go create mode 100644 vendor/github.com/containerd/containerd/runtime/container_solaris.go create mode 100644 vendor/github.com/containerd/containerd/runtime/process.go create mode 100644 vendor/github.com/containerd/containerd/runtime/process_linux.go create mode 100644 vendor/github.com/containerd/containerd/runtime/process_solaris.go create mode 100644 vendor/github.com/containerd/containerd/runtime/runtime.go create mode 100644 vendor/github.com/containerd/containerd/runtime/stats.go create mode 100644 vendor/github.com/containerd/containerd/specs/spec_linux.go create mode 100644 vendor/github.com/containerd/containerd/specs/spec_solaris.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/add_process.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/checkpoint.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/create.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/create_solaris.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/delete.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/errors.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/exit.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/get_containers.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/machine.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/machine_solaris.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/metrics.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/monitor_linux.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/monitor_solaris.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/oom.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/signal.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/sort.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/stats.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/supervisor.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/task.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/types.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/update.go create mode 100644 vendor/github.com/containerd/containerd/supervisor/worker.go create mode 100644 vendor/github.com/containerd/containerd/version.go create mode 100644 vendor/github.com/containerd/go-runc/LICENSE create mode 100644 vendor/github.com/containerd/go-runc/README.md create mode 100644 vendor/github.com/containerd/go-runc/console.go create mode 100644 vendor/github.com/containerd/go-runc/container.go create mode 100644 vendor/github.com/containerd/go-runc/events.go create mode 100644 vendor/github.com/containerd/go-runc/io.go create mode 100644 vendor/github.com/containerd/go-runc/monitor.go create mode 100644 vendor/github.com/containerd/go-runc/runc.go create mode 100644 vendor/github.com/containerd/go-runc/utils.go create mode 100644 vendor/github.com/coreos/etcd/LICENSE create mode 100644 vendor/github.com/coreos/etcd/NOTICE create mode 100644 vendor/github.com/coreos/etcd/README.md create mode 100644 vendor/github.com/coreos/etcd/pkg/README.md create mode 100644 vendor/github.com/coreos/etcd/pkg/crc/crc.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/purge.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go create mode 100644 vendor/github.com/coreos/etcd/pkg/idutil/id.go create mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go create mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go create mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/reader.go create mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/util.go create mode 100644 vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go create mode 100644 vendor/github.com/coreos/etcd/raft/README.md create mode 100644 vendor/github.com/coreos/etcd/raft/doc.go create mode 100644 vendor/github.com/coreos/etcd/raft/log.go create mode 100644 vendor/github.com/coreos/etcd/raft/log_unstable.go create mode 100644 vendor/github.com/coreos/etcd/raft/logger.go create mode 100644 vendor/github.com/coreos/etcd/raft/node.go create mode 100644 vendor/github.com/coreos/etcd/raft/progress.go create mode 100644 vendor/github.com/coreos/etcd/raft/raft.go create mode 100644 vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go create mode 100644 vendor/github.com/coreos/etcd/raft/raftpb/raft.proto create mode 100644 vendor/github.com/coreos/etcd/raft/rawnode.go create mode 100644 vendor/github.com/coreos/etcd/raft/read_only.go create mode 100644 vendor/github.com/coreos/etcd/raft/status.go create mode 100644 vendor/github.com/coreos/etcd/raft/storage.go create mode 100644 vendor/github.com/coreos/etcd/raft/util.go create mode 100644 vendor/github.com/coreos/etcd/snap/db.go create mode 100644 vendor/github.com/coreos/etcd/snap/message.go create mode 100644 vendor/github.com/coreos/etcd/snap/metrics.go create mode 100644 vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go create mode 100644 vendor/github.com/coreos/etcd/snap/snappb/snap.proto create mode 100644 vendor/github.com/coreos/etcd/snap/snapshotter.go create mode 100644 vendor/github.com/coreos/etcd/wal/decoder.go create mode 100644 vendor/github.com/coreos/etcd/wal/doc.go create mode 100644 vendor/github.com/coreos/etcd/wal/encoder.go create mode 100644 vendor/github.com/coreos/etcd/wal/file_pipeline.go create mode 100644 vendor/github.com/coreos/etcd/wal/metrics.go create mode 100644 vendor/github.com/coreos/etcd/wal/repair.go create mode 100644 vendor/github.com/coreos/etcd/wal/util.go create mode 100644 vendor/github.com/coreos/etcd/wal/wal.go create mode 100644 vendor/github.com/coreos/etcd/wal/wal_unix.go create mode 100644 vendor/github.com/coreos/etcd/wal/wal_windows.go create mode 100644 vendor/github.com/coreos/etcd/wal/walpb/record.go create mode 100644 vendor/github.com/coreos/etcd/wal/walpb/record.pb.go create mode 100644 vendor/github.com/coreos/etcd/wal/walpb/record.proto create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/README.md create mode 100644 vendor/github.com/coreos/go-systemd/activation/files.go create mode 100644 vendor/github.com/coreos/go-systemd/activation/listeners.go create mode 100644 vendor/github.com/coreos/go-systemd/activation/packetconns.go create mode 100644 vendor/github.com/coreos/go-systemd/daemon/sdnotify.go create mode 100644 vendor/github.com/coreos/go-systemd/daemon/watchdog.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/dbus.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/methods.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/properties.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/set.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/subscription.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/subscription_set.go create mode 100644 vendor/github.com/coreos/go-systemd/journal/journal.go create mode 100644 vendor/github.com/coreos/go-systemd/util/util.go create mode 100644 vendor/github.com/coreos/go-systemd/util/util_cgo.go create mode 100644 vendor/github.com/coreos/go-systemd/util/util_stub.go create mode 100644 vendor/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/coreos/pkg/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/dlopen/dlopen.go create mode 100644 vendor/github.com/coreos/pkg/dlopen/dlopen_example.go create mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE create mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/README.md create mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/README.md create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/deckarep/golang-set/LICENSE create mode 100644 vendor/github.com/deckarep/golang-set/README.md create mode 100644 vendor/github.com/deckarep/golang-set/set.go create mode 100644 vendor/github.com/deckarep/golang-set/threadsafe.go create mode 100644 vendor/github.com/deckarep/golang-set/threadunsafe.go create mode 100644 vendor/github.com/docker/cli/LICENSE create mode 100644 vendor/github.com/docker/cli/NOTICE create mode 100644 vendor/github.com/docker/cli/README.md create mode 100644 vendor/github.com/docker/cli/VERSION create mode 100644 vendor/github.com/docker/cli/cli/cobra.go create mode 100644 vendor/github.com/docker/cli/cli/command/cli.go create mode 100644 vendor/github.com/docker/cli/cli/command/commands/commands.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/attach.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/cmd.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/commit.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/cp.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/create.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/diff.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/exec.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/export.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/hijack.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/inspect.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/kill.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/list.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/logs.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/opts.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/pause.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/port.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/prune.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/rename.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/restart.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/rm.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/run.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/start.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/stats.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/stats_helpers.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/stop.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/top.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/tty.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/unpause.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/update.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/utils.go create mode 100644 vendor/github.com/docker/cli/cli/command/container/wait.go create mode 100644 vendor/github.com/docker/cli/cli/command/events_utils.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/checkpoint.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/config.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/container.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/custom.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/diff.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/formatter.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/history.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/image.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/network.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/node.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/plugin.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/reflect.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/secret.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/service.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/stack.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/stats.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/task.go create mode 100644 vendor/github.com/docker/cli/cli/command/formatter/volume.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/build.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/build/context.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/build/context_unix.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/build/context_windows.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/build/dockerignore.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/cmd.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/delta.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/history.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/import.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/inspect.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/list.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/load.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/prune.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/pull.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/push.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/remove.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/save.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/tag.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/trust.go create mode 100644 vendor/github.com/docker/cli/cli/command/in.go create mode 100644 vendor/github.com/docker/cli/cli/command/inspect/inspector.go create mode 100644 vendor/github.com/docker/cli/cli/command/network/cmd.go create mode 100644 vendor/github.com/docker/cli/cli/command/network/connect.go create mode 100644 vendor/github.com/docker/cli/cli/command/network/create.go create mode 100644 vendor/github.com/docker/cli/cli/command/network/disconnect.go create mode 100644 vendor/github.com/docker/cli/cli/command/network/inspect.go create mode 100644 vendor/github.com/docker/cli/cli/command/network/list.go create mode 100644 vendor/github.com/docker/cli/cli/command/network/prune.go create mode 100644 vendor/github.com/docker/cli/cli/command/network/remove.go create mode 100644 vendor/github.com/docker/cli/cli/command/out.go create mode 100644 vendor/github.com/docker/cli/cli/command/prune/prune.go create mode 100644 vendor/github.com/docker/cli/cli/command/registry.go create mode 100644 vendor/github.com/docker/cli/cli/command/registry/login.go create mode 100644 vendor/github.com/docker/cli/cli/command/registry/logout.go create mode 100644 vendor/github.com/docker/cli/cli/command/registry/search.go create mode 100644 vendor/github.com/docker/cli/cli/command/stream.go create mode 100644 vendor/github.com/docker/cli/cli/command/system/cmd.go create mode 100644 vendor/github.com/docker/cli/cli/command/system/df.go create mode 100644 vendor/github.com/docker/cli/cli/command/system/events.go create mode 100644 vendor/github.com/docker/cli/cli/command/system/info.go create mode 100644 vendor/github.com/docker/cli/cli/command/system/inspect.go create mode 100644 vendor/github.com/docker/cli/cli/command/system/prune.go create mode 100644 vendor/github.com/docker/cli/cli/command/system/version.go create mode 100644 vendor/github.com/docker/cli/cli/command/trust.go create mode 100644 vendor/github.com/docker/cli/cli/command/utils.go create mode 100644 vendor/github.com/docker/cli/cli/command/volume/cmd.go create mode 100644 vendor/github.com/docker/cli/cli/command/volume/create.go create mode 100644 vendor/github.com/docker/cli/cli/command/volume/inspect.go create mode 100644 vendor/github.com/docker/cli/cli/command/volume/list.go create mode 100644 vendor/github.com/docker/cli/cli/command/volume/prune.go create mode 100644 vendor/github.com/docker/cli/cli/command/volume/remove.go create mode 100644 vendor/github.com/docker/cli/cli/compose/interpolation/interpolation.go create mode 100644 vendor/github.com/docker/cli/cli/compose/loader/loader.go create mode 100644 vendor/github.com/docker/cli/cli/compose/loader/volume.go create mode 100644 vendor/github.com/docker/cli/cli/compose/schema/bindata.go create mode 100644 vendor/github.com/docker/cli/cli/compose/schema/schema.go create mode 100644 vendor/github.com/docker/cli/cli/compose/template/template.go create mode 100644 vendor/github.com/docker/cli/cli/compose/types/types.go create mode 100644 vendor/github.com/docker/cli/cli/config/config.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/credentials.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/file_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/native_store.go create mode 100644 vendor/github.com/docker/cli/cli/debug/debug.go create mode 100644 vendor/github.com/docker/cli/cli/error.go create mode 100644 vendor/github.com/docker/cli/cli/flags/client.go create mode 100644 vendor/github.com/docker/cli/cli/flags/common.go create mode 100644 vendor/github.com/docker/cli/cli/required.go create mode 100644 vendor/github.com/docker/cli/cli/trust/trust.go create mode 100644 vendor/github.com/docker/cli/cli/version.go create mode 100644 vendor/github.com/docker/cli/cli/winresources/res_windows.go create mode 100644 vendor/github.com/docker/cli/cmd/docker/daemon_none.go create mode 100644 vendor/github.com/docker/cli/cmd/docker/daemon_unix.go create mode 100644 vendor/github.com/docker/cli/cmd/docker/docker.go create mode 100644 vendor/github.com/docker/cli/cmd/docker/docker_windows.go create mode 100644 vendor/github.com/docker/cli/opts/config.go create mode 100644 vendor/github.com/docker/cli/opts/duration.go create mode 100644 vendor/github.com/docker/cli/opts/env.go create mode 100644 vendor/github.com/docker/cli/opts/envfile.go create mode 100644 vendor/github.com/docker/cli/opts/hosts.go create mode 100644 vendor/github.com/docker/cli/opts/hosts_unix.go create mode 100644 vendor/github.com/docker/cli/opts/hosts_windows.go create mode 100644 vendor/github.com/docker/cli/opts/ip.go create mode 100644 vendor/github.com/docker/cli/opts/mount.go create mode 100644 vendor/github.com/docker/cli/opts/network.go create mode 100644 vendor/github.com/docker/cli/opts/opts.go create mode 100644 vendor/github.com/docker/cli/opts/opts_unix.go create mode 100644 vendor/github.com/docker/cli/opts/opts_windows.go create mode 100644 vendor/github.com/docker/cli/opts/parse.go create mode 100644 vendor/github.com/docker/cli/opts/port.go create mode 100644 vendor/github.com/docker/cli/opts/quotedstring.go create mode 100644 vendor/github.com/docker/cli/opts/runtime.go create mode 100644 vendor/github.com/docker/cli/opts/secret.go create mode 100644 vendor/github.com/docker/cli/opts/throttledevice.go create mode 100644 vendor/github.com/docker/cli/opts/ulimit.go create mode 100644 vendor/github.com/docker/cli/opts/weightdevice.go create mode 100755 vendor/github.com/docker/cli/vendor.conf create mode 100644 vendor/github.com/docker/distribution/LICENSE create mode 100644 vendor/github.com/docker/distribution/README.md create mode 100644 vendor/github.com/docker/distribution/blobs.go create mode 100644 vendor/github.com/docker/distribution/context/context.go create mode 100644 vendor/github.com/docker/distribution/context/doc.go create mode 100644 vendor/github.com/docker/distribution/context/http.go create mode 100644 vendor/github.com/docker/distribution/context/logger.go create mode 100644 vendor/github.com/docker/distribution/context/trace.go create mode 100644 vendor/github.com/docker/distribution/context/util.go create mode 100644 vendor/github.com/docker/distribution/context/version.go create mode 100644 vendor/github.com/docker/distribution/digestset/set.go create mode 100644 vendor/github.com/docker/distribution/doc.go create mode 100644 vendor/github.com/docker/distribution/errors.go create mode 100644 vendor/github.com/docker/distribution/manifest/doc.go create mode 100644 vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/config_builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/manifest.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/sign.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/verify.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema2/builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema2/manifest.go create mode 100644 vendor/github.com/docker/distribution/manifest/versioned.go create mode 100644 vendor/github.com/docker/distribution/manifests.go create mode 100644 vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/distribution/registry.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/descriptors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/routes.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/urls.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/api_version.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/session.go create mode 100644 vendor/github.com/docker/distribution/registry/client/blob_writer.go create mode 100644 vendor/github.com/docker/distribution/registry/client/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/client/repository.go create mode 100644 vendor/github.com/docker/distribution/registry/client/transport/http_reader.go create mode 100644 vendor/github.com/docker/distribution/registry/client/transport/transport.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cache.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go create mode 100644 vendor/github.com/docker/distribution/tags.go create mode 100644 vendor/github.com/docker/distribution/uuid/uuid.go create mode 100644 vendor/github.com/docker/distribution/vendor.conf create mode 100644 vendor/github.com/docker/docker-credential-helpers/LICENSE create mode 100644 vendor/github.com/docker/docker-credential-helpers/README.md create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/client.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/command.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/error.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/helper.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c create mode 100644 vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h create mode 100644 vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c create mode 100644 vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h create mode 100644 vendor/github.com/docker/go-connections/LICENSE create mode 100644 vendor/github.com/docker/go-connections/README.md create mode 100644 vendor/github.com/docker/go-connections/nat/nat.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort.go create mode 100644 vendor/github.com/docker/go-connections/sockets/README.md create mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/proxy.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_unix.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_windows.go create mode 100644 vendor/github.com/docker/go-connections/sockets/tcp_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go create mode 100644 vendor/github.com/docker/go-events/LICENSE create mode 100644 vendor/github.com/docker/go-events/README.md create mode 100644 vendor/github.com/docker/go-events/broadcast.go create mode 100644 vendor/github.com/docker/go-events/channel.go create mode 100644 vendor/github.com/docker/go-events/errors.go create mode 100644 vendor/github.com/docker/go-events/event.go create mode 100644 vendor/github.com/docker/go-events/filter.go create mode 100644 vendor/github.com/docker/go-events/queue.go create mode 100644 vendor/github.com/docker/go-events/retry.go create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.code create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.docs create mode 100644 vendor/github.com/docker/go-metrics/NOTICE create mode 100644 vendor/github.com/docker/go-metrics/README.md create mode 100644 vendor/github.com/docker/go-metrics/counter.go create mode 100644 vendor/github.com/docker/go-metrics/docs.go create mode 100644 vendor/github.com/docker/go-metrics/gauge.go create mode 100644 vendor/github.com/docker/go-metrics/handler.go create mode 100644 vendor/github.com/docker/go-metrics/helpers.go create mode 100644 vendor/github.com/docker/go-metrics/namespace.go create mode 100644 vendor/github.com/docker/go-metrics/register.go create mode 100644 vendor/github.com/docker/go-metrics/timer.go create mode 100644 vendor/github.com/docker/go-metrics/unit.go create mode 100644 vendor/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/docker/go/LICENSE create mode 100644 vendor/github.com/docker/go/README.md create mode 100644 vendor/github.com/docker/go/canonical/json/decode.go create mode 100644 vendor/github.com/docker/go/canonical/json/encode.go create mode 100644 vendor/github.com/docker/go/canonical/json/fold.go create mode 100644 vendor/github.com/docker/go/canonical/json/indent.go create mode 100644 vendor/github.com/docker/go/canonical/json/scanner.go create mode 100644 vendor/github.com/docker/go/canonical/json/stream.go create mode 100644 vendor/github.com/docker/go/canonical/json/tags.go create mode 100644 vendor/github.com/docker/libkv/LICENSE.code create mode 100644 vendor/github.com/docker/libkv/LICENSE.docs create mode 100644 vendor/github.com/docker/libkv/README.md create mode 100644 vendor/github.com/docker/libkv/libkv.go create mode 100644 vendor/github.com/docker/libkv/store/boltdb/boltdb.go create mode 100644 vendor/github.com/docker/libkv/store/helpers.go create mode 100644 vendor/github.com/docker/libkv/store/store.go create mode 100644 vendor/github.com/docker/libnetwork/LICENSE create mode 100644 vendor/github.com/docker/libnetwork/README.md create mode 100644 vendor/github.com/docker/libnetwork/agent.go create mode 100644 vendor/github.com/docker/libnetwork/agent.pb.go create mode 100644 vendor/github.com/docker/libnetwork/agent.proto create mode 100644 vendor/github.com/docker/libnetwork/bitseq/sequence.go create mode 100644 vendor/github.com/docker/libnetwork/bitseq/store.go create mode 100644 vendor/github.com/docker/libnetwork/cluster/provider.go create mode 100644 vendor/github.com/docker/libnetwork/cmd/proxy/main.go create mode 100644 vendor/github.com/docker/libnetwork/cmd/proxy/proxy.go create mode 100644 vendor/github.com/docker/libnetwork/cmd/proxy/stub_proxy.go create mode 100644 vendor/github.com/docker/libnetwork/cmd/proxy/tcp_proxy.go create mode 100644 vendor/github.com/docker/libnetwork/cmd/proxy/udp_proxy.go create mode 100644 vendor/github.com/docker/libnetwork/common/setmatrix.go create mode 100644 vendor/github.com/docker/libnetwork/config/config.go create mode 100644 vendor/github.com/docker/libnetwork/controller.go create mode 100644 vendor/github.com/docker/libnetwork/datastore/cache.go create mode 100644 vendor/github.com/docker/libnetwork/datastore/datastore.go create mode 100644 vendor/github.com/docker/libnetwork/datastore/mock_store.go create mode 100644 vendor/github.com/docker/libnetwork/default_gateway.go create mode 100644 vendor/github.com/docker/libnetwork/default_gateway_freebsd.go create mode 100644 vendor/github.com/docker/libnetwork/default_gateway_linux.go create mode 100644 vendor/github.com/docker/libnetwork/default_gateway_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/default_gateway_windows.go create mode 100644 vendor/github.com/docker/libnetwork/discoverapi/discoverapi.go create mode 100644 vendor/github.com/docker/libnetwork/driverapi/driverapi.go create mode 100644 vendor/github.com/docker/libnetwork/driverapi/errors.go create mode 100644 vendor/github.com/docker/libnetwork/driverapi/ipamdata.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/bridge_store.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/brmanager/brmanager.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/errors.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/interface.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/labels.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/link.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_armppc64.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_notarm.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_unsupported.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/port_mapping.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup_bridgenetfiltering.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup_device.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/bridge/setup_verify.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/host/host.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_network.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_state.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_store.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/ipvlan/ivmanager/ivmanager.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/macvlan/mvmanager/mvmanager.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/null/null.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/remote/api/api.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/remote/driver.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge_store.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/bridge/errors.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/bridge/port_mapping.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/encryption.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/joinleave.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_serf.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_utils.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.pb.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.proto create mode 100644 vendor/github.com/docker/libnetwork/drivers/solaris/overlay/peerdb.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/labels.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.pb.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.proto create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/windows.go create mode 100644 vendor/github.com/docker/libnetwork/drivers/windows/windows_store.go create mode 100644 vendor/github.com/docker/libnetwork/drivers_experimental_linux.go create mode 100644 vendor/github.com/docker/libnetwork/drivers_freebsd.go create mode 100644 vendor/github.com/docker/libnetwork/drivers_ipam.go create mode 100644 vendor/github.com/docker/libnetwork/drivers_linux.go create mode 100644 vendor/github.com/docker/libnetwork/drivers_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/drivers_windows.go create mode 100644 vendor/github.com/docker/libnetwork/drvregistry/drvregistry.go create mode 100644 vendor/github.com/docker/libnetwork/endpoint.go create mode 100644 vendor/github.com/docker/libnetwork/endpoint_cnt.go create mode 100644 vendor/github.com/docker/libnetwork/endpoint_info.go create mode 100644 vendor/github.com/docker/libnetwork/endpoint_info_unix.go create mode 100644 vendor/github.com/docker/libnetwork/endpoint_info_windows.go create mode 100644 vendor/github.com/docker/libnetwork/error.go create mode 100644 vendor/github.com/docker/libnetwork/etchosts/etchosts.go create mode 100644 vendor/github.com/docker/libnetwork/firewall_linux.go create mode 100644 vendor/github.com/docker/libnetwork/firewall_others.go create mode 100644 vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery.go create mode 100644 vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery_api.go create mode 100644 vendor/github.com/docker/libnetwork/idm/idm.go create mode 100644 vendor/github.com/docker/libnetwork/ipam/allocator.go create mode 100644 vendor/github.com/docker/libnetwork/ipam/store.go create mode 100644 vendor/github.com/docker/libnetwork/ipam/structures.go create mode 100644 vendor/github.com/docker/libnetwork/ipam/utils.go create mode 100644 vendor/github.com/docker/libnetwork/ipamapi/contract.go create mode 100644 vendor/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go create mode 100644 vendor/github.com/docker/libnetwork/ipams/builtin/builtin_windows.go create mode 100644 vendor/github.com/docker/libnetwork/ipams/null/null.go create mode 100644 vendor/github.com/docker/libnetwork/ipams/remote/api/api.go create mode 100644 vendor/github.com/docker/libnetwork/ipams/remote/remote.go create mode 100644 vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go create mode 100644 vendor/github.com/docker/libnetwork/ipamutils/utils.go create mode 100644 vendor/github.com/docker/libnetwork/iptables/conntrack.go create mode 100644 vendor/github.com/docker/libnetwork/iptables/firewalld.go create mode 100644 vendor/github.com/docker/libnetwork/iptables/iptables.go create mode 100644 vendor/github.com/docker/libnetwork/ipvs/constants.go create mode 100644 vendor/github.com/docker/libnetwork/ipvs/ipvs.go create mode 100644 vendor/github.com/docker/libnetwork/ipvs/netlink.go create mode 100644 vendor/github.com/docker/libnetwork/netlabel/labels.go create mode 100644 vendor/github.com/docker/libnetwork/netutils/utils.go create mode 100644 vendor/github.com/docker/libnetwork/netutils/utils_freebsd.go create mode 100644 vendor/github.com/docker/libnetwork/netutils/utils_linux.go create mode 100644 vendor/github.com/docker/libnetwork/netutils/utils_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/netutils/utils_windows.go create mode 100644 vendor/github.com/docker/libnetwork/network.go create mode 100644 vendor/github.com/docker/libnetwork/network_unix.go create mode 100644 vendor/github.com/docker/libnetwork/network_windows.go create mode 100644 vendor/github.com/docker/libnetwork/networkdb/broadcast.go create mode 100644 vendor/github.com/docker/libnetwork/networkdb/cluster.go create mode 100644 vendor/github.com/docker/libnetwork/networkdb/delegate.go create mode 100644 vendor/github.com/docker/libnetwork/networkdb/event_delegate.go create mode 100644 vendor/github.com/docker/libnetwork/networkdb/message.go create mode 100644 vendor/github.com/docker/libnetwork/networkdb/networkdb.go create mode 100644 vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go create mode 100644 vendor/github.com/docker/libnetwork/networkdb/networkdb.proto create mode 100644 vendor/github.com/docker/libnetwork/networkdb/watch.go create mode 100644 vendor/github.com/docker/libnetwork/ns/init_linux.go create mode 100644 vendor/github.com/docker/libnetwork/options/options.go create mode 100644 vendor/github.com/docker/libnetwork/osl/interface_freebsd.go create mode 100644 vendor/github.com/docker/libnetwork/osl/interface_linux.go create mode 100644 vendor/github.com/docker/libnetwork/osl/interface_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/osl/interface_windows.go create mode 100644 vendor/github.com/docker/libnetwork/osl/namespace_linux.go create mode 100644 vendor/github.com/docker/libnetwork/osl/namespace_unsupported.go create mode 100644 vendor/github.com/docker/libnetwork/osl/namespace_windows.go create mode 100644 vendor/github.com/docker/libnetwork/osl/neigh_freebsd.go create mode 100644 vendor/github.com/docker/libnetwork/osl/neigh_linux.go create mode 100644 vendor/github.com/docker/libnetwork/osl/neigh_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/osl/neigh_windows.go create mode 100644 vendor/github.com/docker/libnetwork/osl/options_linux.go create mode 100644 vendor/github.com/docker/libnetwork/osl/route_linux.go create mode 100644 vendor/github.com/docker/libnetwork/osl/sandbox.go create mode 100644 vendor/github.com/docker/libnetwork/osl/sandbox_freebsd.go create mode 100644 vendor/github.com/docker/libnetwork/osl/sandbox_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/osl/sandbox_unsupported.go create mode 100644 vendor/github.com/docker/libnetwork/portallocator/portallocator.go create mode 100644 vendor/github.com/docker/libnetwork/portallocator/portallocator_linux.go create mode 100644 vendor/github.com/docker/libnetwork/portallocator/portallocator_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/portmapper/mapper.go create mode 100644 vendor/github.com/docker/libnetwork/portmapper/mock_proxy.go create mode 100644 vendor/github.com/docker/libnetwork/portmapper/proxy.go create mode 100644 vendor/github.com/docker/libnetwork/portmapper/proxy_linux.go create mode 100644 vendor/github.com/docker/libnetwork/portmapper/proxy_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/README.md create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go create mode 100644 vendor/github.com/docker/libnetwork/resolver.go create mode 100644 vendor/github.com/docker/libnetwork/resolver_unix.go create mode 100644 vendor/github.com/docker/libnetwork/resolver_windows.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_dns_unix.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_dns_windows.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_externalkey.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_externalkey_solaris.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_store.go create mode 100644 vendor/github.com/docker/libnetwork/service.go create mode 100644 vendor/github.com/docker/libnetwork/service_common.go create mode 100644 vendor/github.com/docker/libnetwork/service_linux.go create mode 100644 vendor/github.com/docker/libnetwork/service_unsupported.go create mode 100644 vendor/github.com/docker/libnetwork/service_windows.go create mode 100644 vendor/github.com/docker/libnetwork/store.go create mode 100644 vendor/github.com/docker/libnetwork/types/types.go create mode 100644 vendor/github.com/docker/libnetwork/vendor.conf create mode 100644 vendor/github.com/docker/libtrust/LICENSE create mode 100644 vendor/github.com/docker/libtrust/README.md create mode 100644 vendor/github.com/docker/libtrust/certificates.go create mode 100644 vendor/github.com/docker/libtrust/doc.go create mode 100644 vendor/github.com/docker/libtrust/ec_key.go create mode 100644 vendor/github.com/docker/libtrust/filter.go create mode 100644 vendor/github.com/docker/libtrust/hash.go create mode 100644 vendor/github.com/docker/libtrust/jsonsign.go create mode 100644 vendor/github.com/docker/libtrust/key.go create mode 100644 vendor/github.com/docker/libtrust/key_files.go create mode 100644 vendor/github.com/docker/libtrust/key_manager.go create mode 100644 vendor/github.com/docker/libtrust/rsa_key.go create mode 100644 vendor/github.com/docker/libtrust/util.go create mode 100644 vendor/github.com/docker/notary/LICENSE create mode 100644 vendor/github.com/docker/notary/README.md create mode 100644 vendor/github.com/docker/notary/client/changelist/change.go create mode 100644 vendor/github.com/docker/notary/client/changelist/changelist.go create mode 100644 vendor/github.com/docker/notary/client/changelist/file_changelist.go create mode 100644 vendor/github.com/docker/notary/client/changelist/interface.go create mode 100644 vendor/github.com/docker/notary/client/client.go create mode 100644 vendor/github.com/docker/notary/client/delegations.go create mode 100644 vendor/github.com/docker/notary/client/helpers.go create mode 100644 vendor/github.com/docker/notary/client/repo.go create mode 100644 vendor/github.com/docker/notary/client/repo_pkcs11.go create mode 100644 vendor/github.com/docker/notary/client/tufclient.go create mode 100644 vendor/github.com/docker/notary/client/witness.go create mode 100644 vendor/github.com/docker/notary/const.go create mode 100644 vendor/github.com/docker/notary/const_nowindows.go create mode 100644 vendor/github.com/docker/notary/const_windows.go create mode 100644 vendor/github.com/docker/notary/cryptoservice/certificate.go create mode 100644 vendor/github.com/docker/notary/cryptoservice/crypto_service.go create mode 100644 vendor/github.com/docker/notary/notary.go create mode 100644 vendor/github.com/docker/notary/passphrase/passphrase.go create mode 100644 vendor/github.com/docker/notary/storage/errors.go create mode 100644 vendor/github.com/docker/notary/storage/filestore.go create mode 100644 vendor/github.com/docker/notary/storage/httpstore.go create mode 100644 vendor/github.com/docker/notary/storage/interfaces.go create mode 100644 vendor/github.com/docker/notary/storage/memorystore.go create mode 100644 vendor/github.com/docker/notary/storage/offlinestore.go create mode 100644 vendor/github.com/docker/notary/trustmanager/interfaces.go create mode 100644 vendor/github.com/docker/notary/trustmanager/keystore.go create mode 100644 vendor/github.com/docker/notary/trustmanager/yubikey/import.go create mode 100644 vendor/github.com/docker/notary/trustmanager/yubikey/non_pkcs11.go create mode 100644 vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_darwin.go create mode 100644 vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_interface.go create mode 100644 vendor/github.com/docker/notary/trustmanager/yubikey/pkcs11_linux.go create mode 100644 vendor/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go create mode 100644 vendor/github.com/docker/notary/trustpinning/certs.go create mode 100644 vendor/github.com/docker/notary/trustpinning/trustpin.go create mode 100644 vendor/github.com/docker/notary/tuf/LICENSE create mode 100644 vendor/github.com/docker/notary/tuf/README.md create mode 100644 vendor/github.com/docker/notary/tuf/builder.go create mode 100644 vendor/github.com/docker/notary/tuf/data/errors.go create mode 100644 vendor/github.com/docker/notary/tuf/data/keys.go create mode 100644 vendor/github.com/docker/notary/tuf/data/roles.go create mode 100644 vendor/github.com/docker/notary/tuf/data/root.go create mode 100644 vendor/github.com/docker/notary/tuf/data/serializer.go create mode 100644 vendor/github.com/docker/notary/tuf/data/snapshot.go create mode 100644 vendor/github.com/docker/notary/tuf/data/targets.go create mode 100644 vendor/github.com/docker/notary/tuf/data/timestamp.go create mode 100644 vendor/github.com/docker/notary/tuf/data/types.go create mode 100644 vendor/github.com/docker/notary/tuf/signed/ed25519.go create mode 100644 vendor/github.com/docker/notary/tuf/signed/errors.go create mode 100644 vendor/github.com/docker/notary/tuf/signed/interface.go create mode 100644 vendor/github.com/docker/notary/tuf/signed/sign.go create mode 100644 vendor/github.com/docker/notary/tuf/signed/verifiers.go create mode 100644 vendor/github.com/docker/notary/tuf/signed/verify.go create mode 100644 vendor/github.com/docker/notary/tuf/tuf.go create mode 100644 vendor/github.com/docker/notary/tuf/utils/role_sort.go create mode 100644 vendor/github.com/docker/notary/tuf/utils/stack.go create mode 100644 vendor/github.com/docker/notary/tuf/utils/utils.go create mode 100644 vendor/github.com/docker/notary/tuf/utils/x509.go create mode 100644 vendor/github.com/docker/notary/tuf/validation/errors.go create mode 100644 vendor/github.com/docker/swarmkit/LICENSE create mode 100644 vendor/github.com/docker/swarmkit/README.md create mode 100644 vendor/github.com/docker/swarmkit/agent/agent.go create mode 100644 vendor/github.com/docker/swarmkit/agent/config.go create mode 100644 vendor/github.com/docker/swarmkit/agent/configs/configs.go create mode 100644 vendor/github.com/docker/swarmkit/agent/dependency.go create mode 100644 vendor/github.com/docker/swarmkit/agent/errors.go create mode 100644 vendor/github.com/docker/swarmkit/agent/exec/controller.go create mode 100644 vendor/github.com/docker/swarmkit/agent/exec/controller_stub.go create mode 100644 vendor/github.com/docker/swarmkit/agent/exec/errors.go create mode 100644 vendor/github.com/docker/swarmkit/agent/exec/executor.go create mode 100644 vendor/github.com/docker/swarmkit/agent/helpers.go create mode 100644 vendor/github.com/docker/swarmkit/agent/reporter.go create mode 100644 vendor/github.com/docker/swarmkit/agent/resource.go create mode 100644 vendor/github.com/docker/swarmkit/agent/secrets/secrets.go create mode 100644 vendor/github.com/docker/swarmkit/agent/session.go create mode 100644 vendor/github.com/docker/swarmkit/agent/storage.go create mode 100644 vendor/github.com/docker/swarmkit/agent/task.go create mode 100644 vendor/github.com/docker/swarmkit/agent/worker.go create mode 100644 vendor/github.com/docker/swarmkit/api/README.md create mode 100644 vendor/github.com/docker/swarmkit/api/ca.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/ca.proto create mode 100644 vendor/github.com/docker/swarmkit/api/control.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/control.proto create mode 100644 vendor/github.com/docker/swarmkit/api/deepcopy/copy.go create mode 100644 vendor/github.com/docker/swarmkit/api/defaults/service.go create mode 100644 vendor/github.com/docker/swarmkit/api/dispatcher.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/dispatcher.proto create mode 100644 vendor/github.com/docker/swarmkit/api/equality/equality.go create mode 100644 vendor/github.com/docker/swarmkit/api/gen.go create mode 100644 vendor/github.com/docker/swarmkit/api/genericresource/helpers.go create mode 100644 vendor/github.com/docker/swarmkit/api/genericresource/parse.go create mode 100644 vendor/github.com/docker/swarmkit/api/genericresource/resource_management.go create mode 100644 vendor/github.com/docker/swarmkit/api/genericresource/string.go create mode 100644 vendor/github.com/docker/swarmkit/api/genericresource/validate.go create mode 100644 vendor/github.com/docker/swarmkit/api/health.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/health.proto create mode 100644 vendor/github.com/docker/swarmkit/api/logbroker.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/logbroker.proto create mode 100644 vendor/github.com/docker/swarmkit/api/naming/naming.go create mode 100644 vendor/github.com/docker/swarmkit/api/objects.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/objects.proto create mode 100644 vendor/github.com/docker/swarmkit/api/raft.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/raft.proto create mode 100644 vendor/github.com/docker/swarmkit/api/resource.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/resource.proto create mode 100644 vendor/github.com/docker/swarmkit/api/snapshot.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/snapshot.proto create mode 100644 vendor/github.com/docker/swarmkit/api/specs.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/specs.proto create mode 100644 vendor/github.com/docker/swarmkit/api/storeobject.go create mode 100644 vendor/github.com/docker/swarmkit/api/types.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/types.proto create mode 100644 vendor/github.com/docker/swarmkit/api/watch.pb.go create mode 100644 vendor/github.com/docker/swarmkit/api/watch.proto create mode 100644 vendor/github.com/docker/swarmkit/ca/auth.go create mode 100644 vendor/github.com/docker/swarmkit/ca/certificates.go create mode 100644 vendor/github.com/docker/swarmkit/ca/config.go create mode 100644 vendor/github.com/docker/swarmkit/ca/external.go create mode 100644 vendor/github.com/docker/swarmkit/ca/forward.go create mode 100644 vendor/github.com/docker/swarmkit/ca/keyreadwriter.go create mode 100644 vendor/github.com/docker/swarmkit/ca/reconciler.go create mode 100644 vendor/github.com/docker/swarmkit/ca/renewer.go create mode 100644 vendor/github.com/docker/swarmkit/ca/server.go create mode 100644 vendor/github.com/docker/swarmkit/ca/transport.go create mode 100644 vendor/github.com/docker/swarmkit/connectionbroker/broker.go create mode 100644 vendor/github.com/docker/swarmkit/identity/doc.go create mode 100644 vendor/github.com/docker/swarmkit/identity/randomid.go create mode 100644 vendor/github.com/docker/swarmkit/ioutils/ioutils.go create mode 100644 vendor/github.com/docker/swarmkit/log/context.go create mode 100644 vendor/github.com/docker/swarmkit/log/grpc.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/allocator.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_darwin.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_ipam.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_network_linux.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_network_windows.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_unsupported.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/doc.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/network.go create mode 100644 vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go create mode 100644 vendor/github.com/docker/swarmkit/manager/constraint/constraint.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/ca_rotation.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/common.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/config.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/network.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/node.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/secret.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/server.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/service.go create mode 100644 vendor/github.com/docker/swarmkit/manager/controlapi/task.go create mode 100644 vendor/github.com/docker/swarmkit/manager/deks.go create mode 100644 vendor/github.com/docker/swarmkit/manager/dirty.go create mode 100644 vendor/github.com/docker/swarmkit/manager/dispatcher/assignments.go create mode 100644 vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go create mode 100644 vendor/github.com/docker/swarmkit/manager/dispatcher/heartbeat/heartbeat.go create mode 100644 vendor/github.com/docker/swarmkit/manager/dispatcher/nodes.go create mode 100644 vendor/github.com/docker/swarmkit/manager/dispatcher/period.go create mode 100644 vendor/github.com/docker/swarmkit/manager/doc.go create mode 100644 vendor/github.com/docker/swarmkit/manager/encryption/encryption.go create mode 100644 vendor/github.com/docker/swarmkit/manager/encryption/nacl.go create mode 100644 vendor/github.com/docker/swarmkit/manager/health/health.go create mode 100644 vendor/github.com/docker/swarmkit/manager/keymanager/keymanager.go create mode 100644 vendor/github.com/docker/swarmkit/manager/logbroker/broker.go create mode 100644 vendor/github.com/docker/swarmkit/manager/logbroker/subscription.go create mode 100644 vendor/github.com/docker/swarmkit/manager/manager.go create mode 100644 vendor/github.com/docker/swarmkit/manager/metrics/collector.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/constraintenforcer/constraint_enforcer.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/replicated.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/services.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/tasks.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/restart/restart.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/service.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/slot.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/task.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/taskinit/init.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go create mode 100644 vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go create mode 100644 vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go create mode 100644 vendor/github.com/docker/swarmkit/manager/resourceapi/allocator.go create mode 100644 vendor/github.com/docker/swarmkit/manager/role_manager.go create mode 100644 vendor/github.com/docker/swarmkit/manager/scheduler/decision_tree.go create mode 100644 vendor/github.com/docker/swarmkit/manager/scheduler/filter.go create mode 100644 vendor/github.com/docker/swarmkit/manager/scheduler/nodeheap.go create mode 100644 vendor/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go create mode 100644 vendor/github.com/docker/swarmkit/manager/scheduler/nodeset.go create mode 100644 vendor/github.com/docker/swarmkit/manager/scheduler/pipeline.go create mode 100644 vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/proposer.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/raft.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/storage.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/storage/snapwrap.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/storage/storage.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/storage/walwrap.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/util.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/raft/wait.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/apply.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/by.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/clusters.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/combinators.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/configs.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/doc.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/extensions.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/memory.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/networks.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/nodes.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/object.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/resources.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/secrets.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/services.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/store/tasks.go create mode 100644 vendor/github.com/docker/swarmkit/manager/state/watch.go create mode 100644 vendor/github.com/docker/swarmkit/manager/watchapi/server.go create mode 100644 vendor/github.com/docker/swarmkit/manager/watchapi/watch.go create mode 100644 vendor/github.com/docker/swarmkit/node/node.go create mode 100644 vendor/github.com/docker/swarmkit/protobuf/plugin/gen.go create mode 100644 vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go create mode 100644 vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go create mode 100644 vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto create mode 100644 vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/docker/swarmkit/remotes/remotes.go create mode 100644 vendor/github.com/docker/swarmkit/template/context.go create mode 100644 vendor/github.com/docker/swarmkit/template/expand.go create mode 100644 vendor/github.com/docker/swarmkit/template/getter.go create mode 100644 vendor/github.com/docker/swarmkit/template/template.go create mode 100644 vendor/github.com/docker/swarmkit/vendor.conf create mode 100644 vendor/github.com/docker/swarmkit/watch/watch.go create mode 100644 vendor/github.com/docker/swarmkit/xnet/xnet_unix.go create mode 100644 vendor/github.com/docker/swarmkit/xnet/xnet_windows.go create mode 100644 vendor/github.com/fluent/fluent-logger-golang/LICENSE create mode 100644 vendor/github.com/fluent/fluent-logger-golang/README.md create mode 100644 vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go create mode 100644 vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go create mode 100644 vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go create mode 100644 vendor/github.com/fluent/fluent-logger-golang/fluent/version.go create mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE create mode 100644 vendor/github.com/fsnotify/fsnotify/README.md create mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 vendor/github.com/go-check/check/LICENSE create mode 100644 vendor/github.com/go-check/check/README.md create mode 100644 vendor/github.com/go-check/check/benchmark.go create mode 100644 vendor/github.com/go-check/check/check.go create mode 100644 vendor/github.com/go-check/check/checkers.go create mode 100644 vendor/github.com/go-check/check/helpers.go create mode 100644 vendor/github.com/go-check/check/printer.go create mode 100644 vendor/github.com/go-check/check/reporter.go create mode 100644 vendor/github.com/go-check/check/run.go create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/README_ZH.md create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/godbus/dbus/LICENSE create mode 100644 vendor/github.com/godbus/dbus/README.markdown create mode 100644 vendor/github.com/godbus/dbus/auth.go create mode 100644 vendor/github.com/godbus/dbus/auth_external.go create mode 100644 vendor/github.com/godbus/dbus/auth_sha1.go create mode 100644 vendor/github.com/godbus/dbus/call.go create mode 100644 vendor/github.com/godbus/dbus/conn.go create mode 100644 vendor/github.com/godbus/dbus/conn_darwin.go create mode 100644 vendor/github.com/godbus/dbus/conn_other.go create mode 100644 vendor/github.com/godbus/dbus/dbus.go create mode 100644 vendor/github.com/godbus/dbus/decoder.go create mode 100644 vendor/github.com/godbus/dbus/doc.go create mode 100644 vendor/github.com/godbus/dbus/encoder.go create mode 100644 vendor/github.com/godbus/dbus/export.go create mode 100644 vendor/github.com/godbus/dbus/homedir.go create mode 100644 vendor/github.com/godbus/dbus/homedir_dynamic.go create mode 100644 vendor/github.com/godbus/dbus/homedir_static.go create mode 100644 vendor/github.com/godbus/dbus/message.go create mode 100644 vendor/github.com/godbus/dbus/object.go create mode 100644 vendor/github.com/godbus/dbus/sig.go create mode 100644 vendor/github.com/godbus/dbus/transport_darwin.go create mode 100644 vendor/github.com/godbus/dbus/transport_generic.go create mode 100644 vendor/github.com/godbus/dbus/transport_unix.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_linux.go create mode 100644 vendor/github.com/godbus/dbus/variant.go create mode 100644 vendor/github.com/godbus/dbus/variant_lexer.go create mode 100644 vendor/github.com/godbus/dbus/variant_parser.go create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/README create mode 100644 vendor/github.com/gogo/protobuf/Readme.md create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/io/full.go create mode 100644 vendor/github.com/gogo/protobuf/io/io.go create mode 100644 vendor/github.com/gogo/protobuf/io/uint32.go create mode 100644 vendor/github.com/gogo/protobuf/io/varint.go create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go create mode 100644 vendor/github.com/gogo/protobuf/types/any.go create mode 100644 vendor/github.com/gogo/protobuf/types/any.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/doc.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/empty.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/field_mask.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/struct.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/wrappers.pb.go create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/README.md create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/google/certificate-transparency/LICENSE create mode 100644 vendor/github.com/google/certificate-transparency/README-MacOS.md create mode 100644 vendor/github.com/google/certificate-transparency/README.md create mode 100644 vendor/github.com/google/certificate-transparency/cpp/third_party/curl/hostcheck.c create mode 100644 vendor/github.com/google/certificate-transparency/cpp/third_party/curl/hostcheck.h create mode 100644 vendor/github.com/google/certificate-transparency/cpp/third_party/isec_partners/openssl_hostname_validation.c create mode 100644 vendor/github.com/google/certificate-transparency/cpp/third_party/isec_partners/openssl_hostname_validation.h create mode 100644 vendor/github.com/google/certificate-transparency/cpp/version.h create mode 100644 vendor/github.com/google/certificate-transparency/go/README.md create mode 100755 vendor/github.com/google/certificate-transparency/go/asn1/asn1.go create mode 100755 vendor/github.com/google/certificate-transparency/go/asn1/common.go create mode 100755 vendor/github.com/google/certificate-transparency/go/asn1/marshal.go create mode 100644 vendor/github.com/google/certificate-transparency/go/client/getentries.go create mode 100644 vendor/github.com/google/certificate-transparency/go/client/logclient.go create mode 100644 vendor/github.com/google/certificate-transparency/go/serialization.go create mode 100644 vendor/github.com/google/certificate-transparency/go/signatures.go create mode 100644 vendor/github.com/google/certificate-transparency/go/types.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/cert_pool.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/pem_decrypt.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/pkcs1.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/pkcs8.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/pkix/pkix.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/root.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/root_darwin.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/root_plan9.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/root_stub.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/root_unix.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/root_windows.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/sec1.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/verify.go create mode 100755 vendor/github.com/google/certificate-transparency/go/x509/x509.go create mode 100644 vendor/github.com/google/certificate-transparency/proto/ct.proto create mode 100644 vendor/github.com/googleapis/gax-go/LICENSE create mode 100644 vendor/github.com/googleapis/gax-go/README.md create mode 100644 vendor/github.com/googleapis/gax-go/call_option.go create mode 100644 vendor/github.com/googleapis/gax-go/gax.go create mode 100644 vendor/github.com/googleapis/gax-go/invoke.go create mode 100644 vendor/github.com/googleapis/gax-go/path_template.go create mode 100644 vendor/github.com/googleapis/gax-go/path_template_parser.go create mode 100644 vendor/github.com/gorilla/context/LICENSE create mode 100644 vendor/github.com/gorilla/context/README.md create mode 100644 vendor/github.com/gorilla/context/context.go create mode 100644 vendor/github.com/gorilla/context/doc.go create mode 100644 vendor/github.com/gorilla/mux/LICENSE create mode 100644 vendor/github.com/gorilla/mux/README.md create mode 100644 vendor/github.com/gorilla/mux/doc.go create mode 100644 vendor/github.com/gorilla/mux/mux.go create mode 100644 vendor/github.com/gorilla/mux/regexp.go create mode 100644 vendor/github.com/gorilla/mux/route.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/LICENSE create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/README.md create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/edges.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iradix.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/node.go create mode 100644 vendor/github.com/hashicorp/go-memdb/LICENSE create mode 100644 vendor/github.com/hashicorp/go-memdb/README.md create mode 100644 vendor/github.com/hashicorp/go-memdb/index.go create mode 100644 vendor/github.com/hashicorp/go-memdb/memdb.go create mode 100644 vendor/github.com/hashicorp/go-memdb/schema.go create mode 100644 vendor/github.com/hashicorp/go-memdb/txn.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/LICENSE create mode 100644 vendor/github.com/hashicorp/go-msgpack/README.md create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/0doc.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/README.md create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/binc.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/decode.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/encode.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/helper.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/rpc.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/simple.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/time.go create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/LICENSE create mode 100644 vendor/github.com/hashicorp/go-sockaddr/README.md create mode 100644 vendor/github.com/hashicorp/go-sockaddr/doc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifattr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/rfc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_default.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/unixsock.go create mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/README.md create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/memberlist/LICENSE create mode 100644 vendor/github.com/hashicorp/memberlist/README.md create mode 100644 vendor/github.com/hashicorp/memberlist/alive_delegate.go create mode 100644 vendor/github.com/hashicorp/memberlist/awareness.go create mode 100644 vendor/github.com/hashicorp/memberlist/broadcast.go create mode 100644 vendor/github.com/hashicorp/memberlist/config.go create mode 100644 vendor/github.com/hashicorp/memberlist/conflict_delegate.go create mode 100644 vendor/github.com/hashicorp/memberlist/delegate.go create mode 100644 vendor/github.com/hashicorp/memberlist/event_delegate.go create mode 100644 vendor/github.com/hashicorp/memberlist/keyring.go create mode 100644 vendor/github.com/hashicorp/memberlist/logging.go create mode 100644 vendor/github.com/hashicorp/memberlist/memberlist.go create mode 100644 vendor/github.com/hashicorp/memberlist/merge_delegate.go create mode 100644 vendor/github.com/hashicorp/memberlist/mock_transport.go create mode 100644 vendor/github.com/hashicorp/memberlist/net.go create mode 100644 vendor/github.com/hashicorp/memberlist/net_transport.go create mode 100644 vendor/github.com/hashicorp/memberlist/ping_delegate.go create mode 100644 vendor/github.com/hashicorp/memberlist/queue.go create mode 100644 vendor/github.com/hashicorp/memberlist/security.go create mode 100644 vendor/github.com/hashicorp/memberlist/state.go create mode 100644 vendor/github.com/hashicorp/memberlist/suspicion.go create mode 100644 vendor/github.com/hashicorp/memberlist/transport.go create mode 100644 vendor/github.com/hashicorp/memberlist/util.go create mode 100644 vendor/github.com/hashicorp/serf/LICENSE create mode 100644 vendor/github.com/hashicorp/serf/README.md create mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go create mode 100644 vendor/github.com/hashicorp/serf/serf/broadcast.go create mode 100644 vendor/github.com/hashicorp/serf/serf/coalesce.go create mode 100644 vendor/github.com/hashicorp/serf/serf/coalesce_member.go create mode 100644 vendor/github.com/hashicorp/serf/serf/coalesce_user.go create mode 100644 vendor/github.com/hashicorp/serf/serf/config.go create mode 100644 vendor/github.com/hashicorp/serf/serf/conflict_delegate.go create mode 100644 vendor/github.com/hashicorp/serf/serf/delegate.go create mode 100644 vendor/github.com/hashicorp/serf/serf/event.go create mode 100644 vendor/github.com/hashicorp/serf/serf/event_delegate.go create mode 100644 vendor/github.com/hashicorp/serf/serf/internal_query.go create mode 100644 vendor/github.com/hashicorp/serf/serf/keymanager.go create mode 100644 vendor/github.com/hashicorp/serf/serf/lamport.go create mode 100644 vendor/github.com/hashicorp/serf/serf/merge_delegate.go create mode 100644 vendor/github.com/hashicorp/serf/serf/messages.go create mode 100644 vendor/github.com/hashicorp/serf/serf/ping_delegate.go create mode 100644 vendor/github.com/hashicorp/serf/serf/query.go create mode 100644 vendor/github.com/hashicorp/serf/serf/serf.go create mode 100644 vendor/github.com/hashicorp/serf/serf/snapshot.go create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/kr/pty/License create mode 100644 vendor/github.com/kr/pty/README.md create mode 100644 vendor/github.com/kr/pty/doc.go create mode 100644 vendor/github.com/kr/pty/ioctl.go create mode 100644 vendor/github.com/kr/pty/ioctl_bsd.go create mode 100644 vendor/github.com/kr/pty/pty_darwin.go create mode 100644 vendor/github.com/kr/pty/pty_freebsd.go create mode 100644 vendor/github.com/kr/pty/pty_linux.go create mode 100644 vendor/github.com/kr/pty/pty_unsupported.go create mode 100644 vendor/github.com/kr/pty/run.go create mode 100644 vendor/github.com/kr/pty/util.go create mode 100644 vendor/github.com/kr/pty/ztypes_386.go create mode 100644 vendor/github.com/kr/pty/ztypes_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_arm.go create mode 100644 vendor/github.com/kr/pty/ztypes_arm64.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_386.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_arm.go create mode 100644 vendor/github.com/kr/pty/ztypes_ppc64.go create mode 100644 vendor/github.com/kr/pty/ztypes_ppc64le.go create mode 100644 vendor/github.com/kr/pty/ztypes_s390x.go create mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE create mode 100644 vendor/github.com/mattn/go-shellwords/README.md create mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/README.md create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/miekg/dns/LICENSE create mode 100644 vendor/github.com/miekg/dns/README.md create mode 100644 vendor/github.com/miekg/dns/client.go create mode 100644 vendor/github.com/miekg/dns/clientconfig.go create mode 100644 vendor/github.com/miekg/dns/defaults.go create mode 100644 vendor/github.com/miekg/dns/dns.go create mode 100644 vendor/github.com/miekg/dns/dnssec.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go create mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go create mode 100644 vendor/github.com/miekg/dns/doc.go create mode 100644 vendor/github.com/miekg/dns/edns.go create mode 100644 vendor/github.com/miekg/dns/format.go create mode 100644 vendor/github.com/miekg/dns/labels.go create mode 100644 vendor/github.com/miekg/dns/msg.go create mode 100644 vendor/github.com/miekg/dns/nsecx.go create mode 100644 vendor/github.com/miekg/dns/privaterr.go create mode 100644 vendor/github.com/miekg/dns/rawmsg.go create mode 100644 vendor/github.com/miekg/dns/sanitize.go create mode 100644 vendor/github.com/miekg/dns/scanner.go create mode 100644 vendor/github.com/miekg/dns/server.go create mode 100644 vendor/github.com/miekg/dns/sig0.go create mode 100644 vendor/github.com/miekg/dns/singleinflight.go create mode 100644 vendor/github.com/miekg/dns/tlsa.go create mode 100644 vendor/github.com/miekg/dns/tsig.go create mode 100644 vendor/github.com/miekg/dns/types.go create mode 100644 vendor/github.com/miekg/dns/udp.go create mode 100644 vendor/github.com/miekg/dns/udp_linux.go create mode 100644 vendor/github.com/miekg/dns/udp_other.go create mode 100644 vendor/github.com/miekg/dns/udp_windows.go create mode 100644 vendor/github.com/miekg/dns/update.go create mode 100644 vendor/github.com/miekg/dns/xfr.go create mode 100644 vendor/github.com/miekg/dns/zgenerate.go create mode 100644 vendor/github.com/miekg/dns/zscan.go create mode 100644 vendor/github.com/miekg/dns/zscan_rr.go create mode 100644 vendor/github.com/miekg/dns/ztypes.go create mode 100644 vendor/github.com/mistifyio/go-zfs/LICENSE create mode 100644 vendor/github.com/mistifyio/go-zfs/README.md create mode 100644 vendor/github.com/mistifyio/go-zfs/error.go create mode 100644 vendor/github.com/mistifyio/go-zfs/utils.go create mode 100644 vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go create mode 100644 vendor/github.com/mistifyio/go-zfs/utils_solaris.go create mode 100644 vendor/github.com/mistifyio/go-zfs/zfs.go create mode 100644 vendor/github.com/mistifyio/go-zfs/zpool.go create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/mrunalp/fileutils/LICENSE create mode 100644 vendor/github.com/mrunalp/fileutils/README.md create mode 100644 vendor/github.com/mrunalp/fileutils/fileutils.go create mode 100644 vendor/github.com/mrunalp/fileutils/idtools.go create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.code create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/README.md create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go create mode 100644 vendor/github.com/opencontainers/go-digest/digest.go create mode 100644 vendor/github.com/opencontainers/go-digest/digester.go create mode 100644 vendor/github.com/opencontainers/go-digest/doc.go create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/README.md create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go create mode 100644 vendor/github.com/opencontainers/runc/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/NOTICE create mode 100644 vendor/github.com/opencontainers/runc/README.md create mode 100644 vendor/github.com/opencontainers/runc/VERSION create mode 100644 vendor/github.com/opencontainers/runc/checkpoint.go create mode 100644 vendor/github.com/opencontainers/runc/create.go create mode 100644 vendor/github.com/opencontainers/runc/delete.go create mode 100644 vendor/github.com/opencontainers/runc/events.go create mode 100644 vendor/github.com/opencontainers/runc/exec.go create mode 100644 vendor/github.com/opencontainers/runc/kill.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/README.md create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/rootless/rootless.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/config.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/device.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/network.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/devices/number.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/error.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/factory.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/generic_error.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/message_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/network_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/process.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/process_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/restored_process.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/specconv/example.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/state_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/sync.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/proc.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go create mode 100644 vendor/github.com/opencontainers/runc/list.go create mode 100644 vendor/github.com/opencontainers/runc/main.go create mode 100644 vendor/github.com/opencontainers/runc/main_solaris.go create mode 100644 vendor/github.com/opencontainers/runc/main_unix.go create mode 100644 vendor/github.com/opencontainers/runc/main_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/notify_socket.go create mode 100644 vendor/github.com/opencontainers/runc/pause.go create mode 100644 vendor/github.com/opencontainers/runc/ps.go create mode 100644 vendor/github.com/opencontainers/runc/restore.go create mode 100644 vendor/github.com/opencontainers/runc/rlimit_linux.go create mode 100644 vendor/github.com/opencontainers/runc/run.go create mode 100644 vendor/github.com/opencontainers/runc/signals.go create mode 100644 vendor/github.com/opencontainers/runc/spec.go create mode 100644 vendor/github.com/opencontainers/runc/start.go create mode 100644 vendor/github.com/opencontainers/runc/state.go create mode 100644 vendor/github.com/opencontainers/runc/tty.go create mode 100644 vendor/github.com/opencontainers/runc/update.go create mode 100644 vendor/github.com/opencontainers/runc/utils.go create mode 100644 vendor/github.com/opencontainers/runc/utils_linux.go create mode 100644 vendor/github.com/opencontainers/runc/vendor.conf create mode 100644 vendor/github.com/opencontainers/runtime-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-spec/README.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/config.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/state.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/selinux/LICENSE create mode 100644 vendor/github.com/opencontainers/selinux/README.md create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go create mode 100644 vendor/github.com/pborman/uuid/LICENSE create mode 100644 vendor/github.com/pborman/uuid/README.md create mode 100755 vendor/github.com/pborman/uuid/dce.go create mode 100755 vendor/github.com/pborman/uuid/doc.go create mode 100644 vendor/github.com/pborman/uuid/hash.go create mode 100644 vendor/github.com/pborman/uuid/json.go create mode 100755 vendor/github.com/pborman/uuid/node.go create mode 100644 vendor/github.com/pborman/uuid/sql.go create mode 100755 vendor/github.com/pborman/uuid/time.go create mode 100644 vendor/github.com/pborman/uuid/util.go create mode 100644 vendor/github.com/pborman/uuid/uuid.go create mode 100644 vendor/github.com/pborman/uuid/version1.go create mode 100644 vendor/github.com/pborman/uuid/version4.go create mode 100644 vendor/github.com/philhofer/fwd/LICENSE.md create mode 100644 vendor/github.com/philhofer/fwd/README.md create mode 100644 vendor/github.com/philhofer/fwd/reader.go create mode 100644 vendor/github.com/philhofer/fwd/writer.go create mode 100644 vendor/github.com/philhofer/fwd/writer_appengine.go create mode 100644 vendor/github.com/philhofer/fwd/writer_unsafe.go create mode 100644 vendor/github.com/pivotal-golang/clock/LICENSE create mode 100644 vendor/github.com/pivotal-golang/clock/README.md create mode 100644 vendor/github.com/pivotal-golang/clock/clock.go create mode 100644 vendor/github.com/pivotal-golang/clock/ticker.go create mode 100644 vendor/github.com/pivotal-golang/clock/timer.go create mode 100644 vendor/github.com/pkg/errors/LICENSE create mode 100644 vendor/github.com/pkg/errors/README.md create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/stack.go create mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE create mode 100644 vendor/github.com/pmezard/go-difflib/README.md create mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/README.md create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/README.md create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE create mode 100644 vendor/github.com/rcrowley/go-metrics/README.md create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go create mode 100755 vendor/github.com/resin-os/circbuf/LICENSE create mode 100644 vendor/github.com/resin-os/circbuf/README.md create mode 100644 vendor/github.com/resin-os/circbuf/circbuf.go create mode 100644 vendor/github.com/resin-os/librsync-go/LICENSE create mode 100644 vendor/github.com/resin-os/librsync-go/README.md create mode 100644 vendor/github.com/resin-os/librsync-go/delta.go create mode 100644 vendor/github.com/resin-os/librsync-go/match.go create mode 100644 vendor/github.com/resin-os/librsync-go/op.go create mode 100644 vendor/github.com/resin-os/librsync-go/patch.go create mode 100644 vendor/github.com/resin-os/librsync-go/rollsum.go create mode 100644 vendor/github.com/resin-os/librsync-go/signature.go create mode 100644 vendor/github.com/sean-/seed/LICENSE create mode 100644 vendor/github.com/sean-/seed/README.md create mode 100644 vendor/github.com/sean-/seed/init.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/LICENSE create mode 100644 vendor/github.com/seccomp/libseccomp-golang/README create mode 100644 vendor/github.com/seccomp/libseccomp-golang/seccomp.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/args.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 vendor/github.com/spf13/cobra/cobra.go create mode 100644 vendor/github.com/spf13/cobra/command.go create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/github.com/spf13/pflag/LICENSE create mode 100644 vendor/github.com/spf13/pflag/README.md create mode 100644 vendor/github.com/spf13/pflag/bool.go create mode 100644 vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 vendor/github.com/spf13/pflag/count.go create mode 100644 vendor/github.com/spf13/pflag/duration.go create mode 100644 vendor/github.com/spf13/pflag/flag.go create mode 100644 vendor/github.com/spf13/pflag/float32.go create mode 100644 vendor/github.com/spf13/pflag/float64.go create mode 100644 vendor/github.com/spf13/pflag/golangflag.go create mode 100644 vendor/github.com/spf13/pflag/int.go create mode 100644 vendor/github.com/spf13/pflag/int32.go create mode 100644 vendor/github.com/spf13/pflag/int64.go create mode 100644 vendor/github.com/spf13/pflag/int8.go create mode 100644 vendor/github.com/spf13/pflag/int_slice.go create mode 100644 vendor/github.com/spf13/pflag/ip.go create mode 100644 vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 vendor/github.com/spf13/pflag/ipmask.go create mode 100644 vendor/github.com/spf13/pflag/ipnet.go create mode 100644 vendor/github.com/spf13/pflag/string.go create mode 100644 vendor/github.com/spf13/pflag/string_array.go create mode 100644 vendor/github.com/spf13/pflag/string_slice.go create mode 100644 vendor/github.com/spf13/pflag/uint.go create mode 100644 vendor/github.com/spf13/pflag/uint16.go create mode 100644 vendor/github.com/spf13/pflag/uint32.go create mode 100644 vendor/github.com/spf13/pflag/uint64.go create mode 100644 vendor/github.com/spf13/pflag/uint8.go create mode 100644 vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 vendor/github.com/stevvooe/continuity/LICENSE create mode 100644 vendor/github.com/stevvooe/continuity/README.md create mode 100644 vendor/github.com/stevvooe/continuity/sysx/asm.s create mode 100644 vendor/github.com/stevvooe/continuity/sysx/chmod_darwin.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/chmod_darwin_386.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/chmod_darwin_amd64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/chmod_freebsd.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/chmod_freebsd_amd64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/chmod_linux.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/copy_linux.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/copy_linux_386.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/copy_linux_amd64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/copy_linux_arm.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/copy_linux_arm64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/nodata_linux.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/nodata_unix.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/sys.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_386.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_amd64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_arm.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_arm64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_darwin.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_darwin_386.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_darwin_amd64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_freebsd.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_linux.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_linux_386.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_linux_amd64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_linux_arm.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_linux_arm64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_linux_ppc64.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_linux_ppc64le.go create mode 100644 vendor/github.com/stevvooe/continuity/sysx/xattr_linux_s390x.go create mode 100644 vendor/github.com/stretchr/testify/LICENSE create mode 100644 vendor/github.com/stretchr/testify/README.md create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go create mode 100644 vendor/github.com/stretchr/testify/assert/doc.go create mode 100644 vendor/github.com/stretchr/testify/assert/errors.go create mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go create mode 100644 vendor/github.com/stretchr/testify/require/doc.go create mode 100644 vendor/github.com/stretchr/testify/require/forward_requirements.go create mode 100644 vendor/github.com/stretchr/testify/require/require.go create mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go create mode 100644 vendor/github.com/stretchr/testify/require/requirements.go create mode 100644 vendor/github.com/syndtr/gocapability/LICENSE create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_linux.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_noop.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum_gen.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 vendor/github.com/tchap/go-patricia/LICENSE create mode 100644 vendor/github.com/tchap/go-patricia/README.md create mode 100644 vendor/github.com/tchap/go-patricia/patricia/children.go create mode 100644 vendor/github.com/tchap/go-patricia/patricia/patricia.go create mode 100644 vendor/github.com/tinylib/msgp/LICENSE create mode 100644 vendor/github.com/tinylib/msgp/README.md create mode 100644 vendor/github.com/tinylib/msgp/msgp/circular.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/defs.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/edit.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/elsize.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/errors.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/extension.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/integers.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/number.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/number_appengine.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/number_unsafe.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/size.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write_bytes.go create mode 100644 vendor/github.com/tonistiigi/fifo/LICENSE create mode 100644 vendor/github.com/tonistiigi/fifo/fifo.go create mode 100644 vendor/github.com/tonistiigi/fifo/handle_linux.go create mode 100644 vendor/github.com/tonistiigi/fifo/handle_nolinux.go create mode 100644 vendor/github.com/tonistiigi/fifo/readme.md create mode 100644 vendor/github.com/tonistiigi/fsutil/LICENSE create mode 100644 vendor/github.com/tonistiigi/fsutil/diff.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diff_containerd.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/generate.go create mode 100644 vendor/github.com/tonistiigi/fsutil/hardlinks.go create mode 100644 vendor/github.com/tonistiigi/fsutil/readme.md create mode 100644 vendor/github.com/tonistiigi/fsutil/receive.go create mode 100644 vendor/github.com/tonistiigi/fsutil/receive_unsupported.go create mode 100644 vendor/github.com/tonistiigi/fsutil/send.go create mode 100644 vendor/github.com/tonistiigi/fsutil/stat.pb.go create mode 100644 vendor/github.com/tonistiigi/fsutil/stat.proto create mode 100644 vendor/github.com/tonistiigi/fsutil/validator.go create mode 100644 vendor/github.com/tonistiigi/fsutil/walker.go create mode 100644 vendor/github.com/tonistiigi/fsutil/walker_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/walker_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/wire.pb.go create mode 100644 vendor/github.com/tonistiigi/fsutil/wire.proto create mode 100644 vendor/github.com/urfave/cli/LICENSE create mode 100644 vendor/github.com/urfave/cli/README.md create mode 100644 vendor/github.com/urfave/cli/app.go create mode 100644 vendor/github.com/urfave/cli/category.go create mode 100644 vendor/github.com/urfave/cli/cli.go create mode 100644 vendor/github.com/urfave/cli/command.go create mode 100644 vendor/github.com/urfave/cli/context.go create mode 100644 vendor/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/urfave/cli/flag.go create mode 100644 vendor/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/urfave/cli/funcs.go create mode 100644 vendor/github.com/urfave/cli/help.go create mode 100644 vendor/github.com/vbatts/tar-split/LICENSE create mode 100644 vendor/github.com/vbatts/tar-split/README.md create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/common.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/reader.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/writer.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/README.md create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/assemble.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/doc.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/doc.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/entry.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/getter.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/packer.go create mode 100644 vendor/github.com/vdemeester/shakers/LICENSE create mode 100644 vendor/github.com/vdemeester/shakers/README.md create mode 100644 vendor/github.com/vdemeester/shakers/bool.go create mode 100644 vendor/github.com/vdemeester/shakers/common.go create mode 100644 vendor/github.com/vdemeester/shakers/string.go create mode 100644 vendor/github.com/vdemeester/shakers/time.go create mode 100644 vendor/github.com/vishvananda/netlink/LICENSE create mode 100644 vendor/github.com/vishvananda/netlink/README.md create mode 100644 vendor/github.com/vishvananda/netlink/addr.go create mode 100644 vendor/github.com/vishvananda/netlink/addr_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/bpf_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/class.go create mode 100644 vendor/github.com/vishvananda/netlink/class_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/conntrack_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/conntrack_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/filter.go create mode 100644 vendor/github.com/vishvananda/netlink/filter_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/genetlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/genetlink_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/gtp_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/handle_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/handle_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/link.go create mode 100644 vendor/github.com/vishvananda/netlink/link_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/link_tuntap_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/neigh.go create mode 100644 vendor/github.com/vishvananda/netlink/neigh_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/netlink.go create mode 100644 vendor/github.com/vishvananda/netlink/netlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/netlink_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/addr_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/link_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/mpls_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/nl_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/route_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/syscall.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/tc_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/order.go create mode 100644 vendor/github.com/vishvananda/netlink/protinfo.go create mode 100644 vendor/github.com/vishvananda/netlink/protinfo_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/qdisc.go create mode 100644 vendor/github.com/vishvananda/netlink/qdisc_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/route.go create mode 100644 vendor/github.com/vishvananda/netlink/route_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/route_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/rule.go create mode 100644 vendor/github.com/vishvananda/netlink/rule_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/socket.go create mode 100644 vendor/github.com/vishvananda/netlink/socket_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state_linux.go create mode 100644 vendor/github.com/vishvananda/netns/LICENSE create mode 100644 vendor/github.com/vishvananda/netns/README.md create mode 100644 vendor/github.com/vishvananda/netns/netns.go create mode 100644 vendor/github.com/vishvananda/netns/netns_linux.go create mode 100644 vendor/github.com/vishvananda/netns/netns_linux_386.go create mode 100644 vendor/github.com/vishvananda/netns/netns_linux_amd64.go create mode 100644 vendor/github.com/vishvananda/netns/netns_linux_arm.go create mode 100644 vendor/github.com/vishvananda/netns/netns_linux_arm64.go create mode 100644 vendor/github.com/vishvananda/netns/netns_linux_ppc64le.go create mode 100644 vendor/github.com/vishvananda/netns/netns_linux_s390x.go create mode 100644 vendor/github.com/vishvananda/netns/netns_unspecified.go create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/pointer.go create mode 100644 vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonreference/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonreference/reference.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonschema/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonschema/errors.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/format_checkers.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/internalLog.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonContext.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/locales.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/result.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaPool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaType.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/subSchema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/types.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/utils.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/validation.go create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/README create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h create mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go create mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go create mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go create mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s create mode 100644 vendor/golang.org/x/crypto/md4/md4.go create mode 100644 vendor/golang.org/x/crypto/md4/md4block.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ref.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/README create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/ciphers.go create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/golang.org/x/net/http2/configure_transport.go create mode 100644 vendor/golang.org/x/net/http2/databuffer.go create mode 100644 vendor/golang.org/x/net/http2/errors.go create mode 100644 vendor/golang.org/x/net/http2/flow.go create mode 100644 vendor/golang.org/x/net/http2/frame.go create mode 100644 vendor/golang.org/x/net/http2/go16.go create mode 100644 vendor/golang.org/x/net/http2/go17.go create mode 100644 vendor/golang.org/x/net/http2/go17_not18.go create mode 100644 vendor/golang.org/x/net/http2/go18.go create mode 100644 vendor/golang.org/x/net/http2/go19.go create mode 100644 vendor/golang.org/x/net/http2/gotrack.go create mode 100644 vendor/golang.org/x/net/http2/headermap.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/http2/http2.go create mode 100644 vendor/golang.org/x/net/http2/not_go16.go create mode 100644 vendor/golang.org/x/net/http2/not_go17.go create mode 100644 vendor/golang.org/x/net/http2/not_go18.go create mode 100644 vendor/golang.org/x/net/http2/not_go19.go create mode 100644 vendor/golang.org/x/net/http2/pipe.go create mode 100644 vendor/golang.org/x/net/http2/server.go create mode 100644 vendor/golang.org/x/net/http2/transport.go create mode 100644 vendor/golang.org/x/net/http2/write.go create mode 100644 vendor/golang.org/x/net/http2/writesched.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 vendor/golang.org/x/net/idna/idna.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/idna/tables.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/lex/httplex/httplex.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/net/trace/trace_go16.go create mode 100644 vendor/golang.org/x/net/trace/trace_go17.go create mode 100644 vendor/golang.org/x/net/websocket/client.go create mode 100644 vendor/golang.org/x/net/websocket/dial.go create mode 100644 vendor/golang.org/x/net/websocket/hybi.go create mode 100644 vendor/golang.org/x/net/websocket/server.go create mode 100644 vendor/golang.org/x/net/websocket/websocket.go create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/appenginevm_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/README create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go create mode 100644 vendor/golang.org/x/sync/syncmap/map.go create mode 100644 vendor/golang.org/x/sys/LICENSE create mode 100644 vendor/golang.org/x/sys/PATENTS create mode 100644 vendor/golang.org/x/sys/README create mode 100644 vendor/golang.org/x/sys/unix/README.md create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/constants.go create mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go create mode 100644 vendor/golang.org/x/sys/unix/dirent.go create mode 100644 vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 vendor/golang.org/x/sys/unix/env_unset.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/file_unix.go create mode 100644 vendor/golang.org/x/sys/unix/flock.go create mode 100644 vendor/golang.org/x/sys/unix/flock_linux_32bit.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/openbsd_pledge.go create mode 100644 vendor/golang.org/x/sys/unix/race.go create mode 100644 vendor/golang.org/x/sys/unix/race0.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 vendor/golang.org/x/sys/unix/str.go create mode 100644 vendor/golang.org/x/sys/unix/syscall.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_no_getwd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/env_unset.go create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/svc/debug/log.go create mode 100644 vendor/golang.org/x/sys/windows/svc/debug/service.go create mode 100644 vendor/golang.org/x/sys/windows/svc/event.go create mode 100644 vendor/golang.org/x/sys/windows/svc/eventlog/install.go create mode 100644 vendor/golang.org/x/sys/windows/svc/eventlog/log.go create mode 100644 vendor/golang.org/x/sys/windows/svc/go12.c create mode 100644 vendor/golang.org/x/sys/windows/svc/go12.go create mode 100644 vendor/golang.org/x/sys/windows/svc/go13.go create mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/config.go create mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/mgr.go create mode 100644 vendor/golang.org/x/sys/windows/svc/mgr/service.go create mode 100644 vendor/golang.org/x/sys/windows/svc/security.go create mode 100644 vendor/golang.org/x/sys/windows/svc/service.go create mode 100644 vendor/golang.org/x/sys/windows/svc/sys_386.s create mode 100644 vendor/golang.org/x/sys/windows/svc/sys_amd64.s create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/README create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/golang.org/x/time/LICENSE create mode 100644 vendor/golang.org/x/time/PATENTS create mode 100644 vendor/golang.org/x/time/README create mode 100644 vendor/golang.org/x/time/rate/rate.go create mode 100644 vendor/google.golang.org/api/LICENSE create mode 100644 vendor/google.golang.org/api/README.md create mode 100644 vendor/google.golang.org/api/googleapi/transport/apikey.go create mode 100644 vendor/google.golang.org/api/internal/pool.go create mode 100644 vendor/google.golang.org/api/internal/settings.go create mode 100644 vendor/google.golang.org/api/iterator/iterator.go create mode 100644 vendor/google.golang.org/api/option/option.go create mode 100644 vendor/google.golang.org/api/support/bundler/bundler.go create mode 100644 vendor/google.golang.org/api/transport/dial.go create mode 100644 vendor/google.golang.org/api/transport/dial_appengine.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/README.md create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/type/http_request.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/v2/logging_metrics.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/PATENTS create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go17.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go18.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go create mode 100644 vendor/google.golang.org/grpc/credentials/oauth/oauth.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/go16.go create mode 100644 vendor/google.golang.org/grpc/go17.go create mode 100644 vendor/google.golang.org/grpc/grpclb.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto create mode 100644 vendor/google.golang.org/grpc/health/health.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/proxy.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/transport/control.go create mode 100644 vendor/google.golang.org/grpc/transport/go16.go create mode 100644 vendor/google.golang.org/grpc/transport/go17.go create mode 100644 vendor/google.golang.org/grpc/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/transport/transport.go create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go create mode 100644 volume/drivers/adapter.go create mode 100644 volume/drivers/extpoint.go create mode 100644 volume/drivers/extpoint_test.go create mode 100644 volume/drivers/proxy.go create mode 100644 volume/drivers/proxy_test.go create mode 100644 volume/local/local.go create mode 100644 volume/local/local_test.go create mode 100644 volume/local/local_unix.go create mode 100644 volume/local/local_windows.go create mode 100644 volume/store/db.go create mode 100644 volume/store/errors.go create mode 100644 volume/store/restore.go create mode 100644 volume/store/store.go create mode 100644 volume/store/store_test.go create mode 100644 volume/store/store_unix.go create mode 100644 volume/store/store_windows.go create mode 100644 volume/testutils/testutils.go create mode 100644 volume/validate.go create mode 100644 volume/validate_test.go create mode 100644 volume/validate_test_unix.go create mode 100644 volume/validate_test_windows.go create mode 100644 volume/volume.go create mode 100644 volume/volume_copy.go create mode 100644 volume/volume_copy_unix.go create mode 100644 volume/volume_copy_windows.go create mode 100644 volume/volume_linux.go create mode 100644 volume/volume_linux_test.go create mode 100644 volume/volume_propagation_linux.go create mode 100644 volume/volume_propagation_linux_test.go create mode 100644 volume/volume_propagation_unsupported.go create mode 100644 volume/volume_test.go create mode 100644 volume/volume_unix.go create mode 100644 volume/volume_unsupported.go create mode 100644 volume/volume_windows.go create mode 100644 www/static/balena.svg create mode 100644 www/static/favicon.ico create mode 100644 www/static/features/bandwidth.svg create mode 100644 www/static/features/failure-resistant.svg create mode 100644 www/static/features/footprint.svg create mode 100644 www/static/features/multiple.svg create mode 100644 www/static/features/storage.svg create mode 100644 www/static/features/undisturbed.svg diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..4f90807 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,1854 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Aaron.L.Xu +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Avilla +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Matsushima +Akihiro Suda +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anran Qiao +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Boudreau +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill W +bin liu +Bingshen Wang +Blake Geno +Boaz Shuster +bobby abbott +Boris Pruessmann +Boshi Lian +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Corey Farrell +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +CUI Wei +Cyprian Gracz +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David McKay +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elena Morozova +Elias Faxö +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeny Vereshchagin +Ewa Czechowska +Eystein MÃ¥løy Stenberg +ezbercih +Ezra Silvera +Fabian Lauer +Fabiano Rosas +Fabio Falci +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +Flavio Crisciani +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +fonglh +fortinux +Foysal Iqbal +Francesc Campoy +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +frosforever +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +gautam, prasanna +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Thornton +Grzegorz Jaśkiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou Zhy +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Iliana Weller +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Janonymous +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Ji.Zhilong +Jian Zhang +jianbosun +Jie Luo +Jilles Oldenbeuving +Jim Alateras +Jim Galasyn +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +jimmyxian +Jinsoo Park +Jiri Popelka +Jiuyue Ma +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard (VM) +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jorge Marin +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +jroenf +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Nayak +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill Kolyshkin +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Daly +Lewis Marshall +Lewis Peckover +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Parker +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Mathieu Parent +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Robenolt +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Mickaël FORTUNATO +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +Muayyad Alsadi +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +nick +Nick DeCoursin +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +orkaa +Oskar Niburski +Otto Kekäläinen +Ovidio Mallo +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Å vihlík +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Pratik Karki +Prayag Verma +Przemek Hejman +Pure White +pysqz +qhuang +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Ricardo N Feliciano +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +sakeven +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Serhat Gülçiçek +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Silvan Jegen +Simei He +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +skaasten +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +Tabakhase +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +tpng +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +uhayate +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Ward Vandewege +WarheadsSE +Wayne Chang +Wayne Song +Wei Wu +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenkai Yin +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +Wonjun Kim +xamyzhao +Xianglin Gao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinbo Weng +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +Yanqiang Miao +Yao Zaiyong +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +You-Sheng Yang (楊有勝) +Youcef YEKHLEF +Yu Changchun +Yu Chengxia +Yu Peng +Yuan Sun +Yuanhong Peng +Yunxiang Huang +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenkun Bi +zhouhao +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +徐俊杰 +搏通 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..2d01bab --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog + +## 17.06+rev1 (2017-10-13) + +### Builder + +- Allow bind-mounting a volume in the build context [#27](https://github.com/resin-os/balena/pull/27) + +### CLI + +- Add command for generating image deltas [#35](https://github.com/resin-os/balena/pull/35) + +### Daemon + +- Add utility that can boot a system into a container [#10](https://github.com/resin-os/balena/pull/10) +- Add the ability to create binary delta between two images [#11](https://github.com/resin-os/balena/pull/11) +- Include engine name in version information [#32](https://github.com/resin-os/balena/pull/32) +- Minimize page cache usage during pull [de0993b](https://github.com/resin-os/balena/pull/12/commits/de0993b9c1ab9408f2716b147dc544b37bb1deb7) + +### Plugins + +- Disable plugin support [#14](https://github.com/resin-os/balena/pull/14) + +### Logging + +- Disable awslogs, fluentd, gcplogs, gelf, logentries, splunk, and syslog logging drivers [fe4d45c](https://github.com/resin-os/balena/pull/7/commits/fe4d45c5dcbaddff51aebfe584a68e8fb9f44449) + +### Runtime + +- Disable consul, etcd, and zookeeper discovery backends [380ba69](https://github.com/resin-os/balena/pull/7/commits/380ba69d00cb56278b65b28571d4d7e000392ec3) + +### Swarm Mode + +- Disable swarm mode [#14](https://github.com/resin-os/balena/pull/14) + +### Distribution + +- On-the-fly layer extraction during docker pull [#8](https://github.com/resin-os/balena/pull/8) +- Improve layer reuse when pushing to v1 registries [#28](https://github.com/resin-os/balena/pull/28) +- Include a combined progress bar of all the layers in the output [#31](https://github.com/resin-os/balena/pull/31) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..917214c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,455 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/moby/moby/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/moby/moby/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Design and cleanup proposals + +You can propose new designs for existing Docker features. You can also design +entirely new features. We really appreciate contributors who want to refactor or +otherwise cleanup our project. For information on making these types of +contributions, see [the advanced contribution +section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in +the contributors guide. + +We try hard to keep Docker lean and focused. Docker can't do everything for +everybody. This means that we might decide against incorporating a new feature. +However, there might be a way to implement that feature *on top of* Docker. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+

+ Read our IRC quickstart guide + for an easy way to get started. +

+
Google Group + The docker-dev + group is for contributors and other people contributing to the Docker project. + You can join them without a google account by sending an email to + docker-dev+subscribe@googlegroups.com. + After receiving the join-request message, you can simply reply to that to confirm the subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has thousands of Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +### Successful Changes + +Before contributing large or high impact changes, make the effort to coordinate +with the maintainers of the project before submitting a pull request. This +prevents you from doing extra work that may or may not be merged. + +Large PRs that are just submitted without any prior communication are unlikely +to be successful. + +While pull requests are the methodology for submitting changes to code, changes +are much more likely to be accepted if they are accompanied by additional +engineering work. While we don't define this explicitly, most of these goals +are accomplished through communication of the design goals and subsequent +solutions. Often times, it helps to first state the problem before presenting +solutions. + +Typically, the best methods of accomplishing this are to submit an issue, +stating the problem. This issue can include a problem statement and a +checklist with requirements. If solutions are proposed, alternatives should be +listed and eliminated. Even if the criteria for elimination of a solution is +frivolous, say so. + +Larger changes typically work best with design documents. These are focused on +providing context to the design at the time the feature was conceived and can +inform future documentation contributions. + +### Commit Messages + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Commit messages should follow best practices, including explaining the context +of the problem and how it was solved, including in caveats or follow up changes +required. They should tell the story of the change and provide readers +understanding of what led to it. + +If you're lost about what this even means, please see [How to Write a Git +Commit Message](http://chris.beams.io/posts/git-commit/) for a start. + +In practice, the best approach to maintaining a nice commit message is to +leverage a `git add -p` and `git commit --amend` to formulate a solid +changeset. This allows one to piece together a change, as information becomes +available. + +If you squash a series of commits, don't just submit that. Re-write the commit +message, as if the series of commits was a single stroke of brilliance. + +That said, there is no requirement to have a single commit for a PR, as long as +each commit tells the story. For example, if there is a feature that requires a +package, it might make sense to have the package in a separate commit then have +a subsequent commit that uses it. + +Remember, you're telling part of the story with the commit message. Don't make +your chapter weird. + +### Review + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..afc7f5f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,227 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run -e DOCKER_GITCOMMIT=foo --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + binutils-mingw-w64 \ + bsdmainutils \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + gcc-mingw-w64 \ + git \ + iptables \ + jq \ + less \ + libapparmor-dev \ + libcap-dev \ + libnl-3-dev \ + libprotobuf-c0-dev \ + libprotobuf-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + net-tools \ + pkg-config \ + protobuf-compiler \ + protobuf-c-compiler \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + tar \ + vim \ + vim-common \ + xfsprogs \ + zip \ + --no-install-recommends \ + && pip install awscli==1.10.15 +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install CRIU for checkpoint/restore support +ENV CRIU_VERSION 2.12.1 +# Install dependancy packages specific to criu +RUN apt-get install libnet-dev -y && \ + mkdir -p /usr/src/criu \ + && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \ + && cd /usr/src/criu \ + && make \ + && make install-criu + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +# To run integration tests docker-pycreds is required. +# Before running the integration tests conftest.py is +# loaded which results in loads auth.py that +# imports the docker-pycreds module. +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install docker-pycreds==0.2.1 \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc +# Add integration helps to bashrc +RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + buildpack-deps:jessie@sha256:85b379ec16065e4fe4127eb1c5fb1bcc03c559bd36dbb2e22ff496de55925fa6 \ + busybox:latest@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f \ + debian:jessie@sha256:72f784399fd2719b4cb4e16ef8e369a39dc67f53d978cd3e2e7bf4e502c7b793 \ + hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy dockercli +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Activate bash completion if mounted with DOCKER_BASH_COMPLETION_PATH +RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64 new file mode 100644 index 0000000..209d92e --- /dev/null +++ b/Dockerfile.aarch64 @@ -0,0 +1,201 @@ +# This file describes the standard way to build Docker on aarch64, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.aarch64 . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM aarch64/ubuntu:xenial + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + g++ \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libc6-dev \ + libcap-dev \ + libsystemd-dev \ + libyaml-dev \ + mercurial \ + net-tools \ + parallel \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-setuptools \ + python-websocket \ + golang-go \ + iproute2 \ + iputils-ping \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support aarch64 properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# We don't have official binary golang 1.7.5 tarballs for ARM64, eigher for Go or +# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code. +# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because +# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /go/bin:/usr/src/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Only install one version of the registry, because old version which support +# schema1 manifests is not working on ARM64, we should skip integration-cli +# tests for schema1 manifests on ARM64. +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +# Before running the integration tests conftest.py is +# loaded which results in loads auth.py that +# imports the docker-pycreds module. +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install wheel \ + && pip install docker-pycreds==0.2.1 \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + aarch64/buildpack-deps:jessie@sha256:107f4a96837ed89c493fc205cd28508ed0b6b680b4bf3e514e9f0fa0f6667b77 \ + aarch64/busybox:latest@sha256:5a06b8b2fdf22dd1f4085c6c3efd23ee99af01b2d668d286bc4be6d8baa10efb \ + aarch64/debian:jessie@sha256:e6f90b568631705bd5cb27490977378ba762792b38d47c91c4da7a539f63079a \ + aarch64/hello-world:latest@sha256:bd1722550b97668b23ede297abf824d4855f4d9f600dab7b4db1a963dae7ec9e +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.armhf b/Dockerfile.armhf new file mode 100644 index 0000000..6103c5a --- /dev/null +++ b/Dockerfile.armhf @@ -0,0 +1,181 @@ +# This file describes the standard way to build Docker on ARMv7, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.armhf . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + createrepo \ + curl \ + cmake \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends \ + && pip install awscli==1.10.15 + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + armhf/buildpack-deps:jessie@sha256:eb2dad77ef53e88d94c3c83862d315c806ea1ca49b6e74f4db362381365ce489 \ + armhf/busybox:latest@sha256:016a1e149d2acc2a3789a160dfa60ce870794eea27ad5e96f7a101970e5e1689 \ + armhf/debian:jessie@sha256:ac59fa18b28d0ef751eabb5ba4c4b5a9063f99398bae2f70495aa8ed6139b577 \ + armhf/hello-world:latest@sha256:9701edc932223a66e49dd6c894a11db8c2cf4eccd1414f1ec105a623bf16b426 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.build.aarch64 b/Dockerfile.build.aarch64 new file mode 100644 index 0000000..e952286 --- /dev/null +++ b/Dockerfile.build.aarch64 @@ -0,0 +1,17 @@ +FROM resin/aarch64-golang:1.9 + +# Enable QEMU emulation +ENTRYPOINT [ "qemu-aarch64-static", "-execve" ] +SHELL [ "qemu-aarch64-static", "-execve", "/bin/sh", "-c" ] + +RUN apt-get update \ + && apt-get install -y \ + btrfs-tools \ + libapparmor-dev \ + libdevmapper-dev \ + libnl-3-dev \ + libsystemd-dev + +COPY . /docker + +WORKDIR /docker diff --git a/Dockerfile.build.arm b/Dockerfile.build.arm new file mode 100644 index 0000000..c667b37 --- /dev/null +++ b/Dockerfile.build.arm @@ -0,0 +1,18 @@ +FROM resin/raspberrypi3-golang:1.9 + +# Enable QEMU emulation +ENTRYPOINT [ "qemu-arm-static", "-execve" ] +SHELL [ "qemu-arm-static", "-execve", "/bin/sh", "-c" ] + +RUN apt-get update \ + && apt-get install -y \ + btrfs-tools \ + libapparmor-dev \ + libdevmapper-dev \ + libnl-3-dev \ + libsystemd-dev \ + libsystemd-journal-dev + +COPY . /docker + +WORKDIR /docker diff --git a/Dockerfile.build.i386 b/Dockerfile.build.i386 new file mode 100644 index 0000000..ff7edab --- /dev/null +++ b/Dockerfile.build.i386 @@ -0,0 +1,13 @@ +FROM i386/golang:1.9 + +RUN apt-get update \ + && apt-get install -y \ + btrfs-tools \ + libapparmor-dev \ + libdevmapper-dev \ + libnl-3-dev \ + libsystemd-dev + +COPY . /docker + +WORKDIR /docker diff --git a/Dockerfile.build.x86_64 b/Dockerfile.build.x86_64 new file mode 100644 index 0000000..b566f73 --- /dev/null +++ b/Dockerfile.build.x86_64 @@ -0,0 +1,13 @@ +FROM resin/amd64-golang:1.9 + +RUN apt-get update \ + && apt-get install -y \ + btrfs-tools \ + libapparmor-dev \ + libdevmapper-dev \ + libnl-3-dev \ + libsystemd-dev + +COPY . /docker + +WORKDIR /docker diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le new file mode 100644 index 0000000..e641538 --- /dev/null +++ b/Dockerfile.ppc64le @@ -0,0 +1,188 @@ +# This file describes the standard way to build Docker on ppc64le, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.ppc64le . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ppc64le/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support ppc64le properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + + +# Install Go +# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + ppc64le/buildpack-deps:jessie@sha256:1a2f2d2cc8738f14b336aeffc3503b5c9dedf9e1f26c7313cb4999534ad4716f \ + ppc64le/busybox:latest@sha256:54f34c83adfab20cf0e630d879e210f07b0062cd6caaf16346a61396d50e7584 \ + ppc64le/debian:jessie@sha256:ea8c5b105e3790f075145b40e4be1e4488c9f33f55e6cc45182047b80a68f892 \ + ppc64le/hello-world:latest@sha256:7d57adf137665f748956c86089320710b66d08584db3500ed98f4bb3da637c2d +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.s390x b/Dockerfile.s390x new file mode 100644 index 0000000..c69da3c --- /dev/null +++ b/Dockerfile.s390x @@ -0,0 +1,181 @@ +# This file describes the standard way to build Docker on s390x, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.s390x . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM s390x/debian:jessie + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support s390x properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.5.0 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux seccomp + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + s390x/buildpack-deps:jessie@sha256:552dec28146e4d2591fc0309aebdbac9e4fb1f335d90c70a14bbf72fb8bb1be5 \ + s390x/busybox:latest@sha256:e32f40c39ca596a4317392bd32809bb188c4ae5864ea827c3219c75c50069964 \ + s390x/debian:jessie@sha256:6994e3ffa5a1dabea09d536f350b3ed2715292cb469417c42a82b70fcbff7d32 \ + s390x/hello-world:latest@sha256:602db500fee63934292260e65c0c528128ad1c1c7c6497f95bbbac7d4d5312f1 +# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.simple b/Dockerfile.simple new file mode 100644 index 0000000..4edc08f --- /dev/null +++ b/Dockerfile.simple @@ -0,0 +1,72 @@ +# docker build -t docker:simple -f Dockerfile.simple . +# docker run --rm docker:simple hack/make.sh dynbinary +# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit +# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli + +# This represents the bare minimum required to build and test Docker. + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + build-essential \ + curl \ + cmake \ + gcc \ + git \ + libapparmor-dev \ + libdevmapper-dev \ + ca-certificates \ + e2fsprogs \ + iptables \ + procps \ + xfsprogs \ + xz-utils \ + \ + aufs-tools \ + vim-common \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped upstream is too old +ENV SECCOMP_VERSION 2.3.2 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go +ENV CGO_LDFLAGS -L/lib + +# Install runc, containerd, tini and docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh runc containerd tini proxy dockercli +ENV PATH=/usr/local/cli:$PATH + +ENV AUTO_GOPATH 1 +WORKDIR /usr/src/docker +COPY . /usr/src/docker diff --git a/Dockerfile.solaris b/Dockerfile.solaris new file mode 100644 index 0000000..4198b13 --- /dev/null +++ b/Dockerfile.solaris @@ -0,0 +1,19 @@ +# Defines an image that hosts a native Docker build environment for Solaris +# TODO: Improve stub + +FROM solaris:latest + +# compile and runtime deps +RUN pkg install --accept \ + git \ + gnu-coreutils \ + gnu-make \ + gnu-tar \ + diagnostic/top \ + golang \ + library/golang/* \ + developer/gcc-* + +ENV GOPATH /go/:/usr/lib/gocode/1.5/ +WORKDIR /go/src/github.com/docker/docker +COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.windows b/Dockerfile.windows new file mode 100644 index 0000000..8f8ee60 --- /dev/null +++ b/Dockerfile.windows @@ -0,0 +1,256 @@ +# escape=` + +# ----------------------------------------------------------------------------------------- +# This file describes the standard way to build Docker in a container on Windows +# Server 2016 or Windows 10. +# +# Maintainer: @jhowardmsft +# ----------------------------------------------------------------------------------------- + + +# Prerequisites: +# -------------- +# +# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major +# build number must be at least 14393. This can be confirmed, for example, by +# running the following from an elevated PowerShell prompt - this sample output +# is from a fully up to date machine as at mid-November 2016: +# +# >> PS C:\> $(gin).WindowsBuildLabEx +# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 +# +# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. +# +# 3. The machine must be configured to run containers. For example, by following +# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or +# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md +# +# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server +# containers as the default option, it is recommended you have at least 1GB +# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you +# should have at least 4GB of memory assigned. Note also, to run Hyper-V +# containers in a VM, it is necessary to configure the VM for nested virtualization. + +# ----------------------------------------------------------------------------------------- + + +# Usage: +# ----- +# +# The following steps should be run from an (elevated*) Windows PowerShell prompt. +# +# (*In a default installation of containers on Windows following the quick-start guidance at +# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +# the docker.exe client must run elevated to be able to connect to the daemon). +# +# 1. Clone the sources from github.com: +# +# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker +# >> Cloning into 'C:\go\src\github.com\docker\docker'... +# >> remote: Counting objects: 186216, done. +# >> remote: Compressing objects: 100% (21/21), done. +# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 +# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. +# >> Resolving deltas: 100% (123139/123139), done. +# >> Checking connectivity... done. +# >> Checking out files: 100% (3912/3912), done. +# >> PS C:\> +# +# +# 2. Change directory to the cloned docker sources: +# +# >> cd C:\go\src\github.com\docker\docker +# +# +# 3. Build a docker image with the components required to build the docker binaries from source +# by running one of the following: +# +# >> docker build -t nativebuildimage -f Dockerfile.windows . +# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) +# +# +# 4. Build the docker executable binaries by running one of the following: +# +# >> $DOCKER_GITCOMMIT=(git rev-parse --short HEAD) +# >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT nativebuildimage hack\make.ps1 -Binary +# >> docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) +# +# +# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination +# folder on the host system where you want the binaries to be located. +# +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe +# +# +# 6. (Optional) Remove the interim container holding the built executable binaries: +# +# >> docker rm binaries +# +# +# 7. (Optional) Remove the image used for the container in which the executable +# binaries are build. Tip - it may be useful to keep this image around if you need to +# build multiple times. Then you can take advantage of the builder cache to have an +# image which has all the components required to build the binaries already installed. +# +# >> docker rmi nativebuildimage +# + +# ----------------------------------------------------------------------------------------- + + +# The validation tests can only run directly on the host. This is because they calculate +# information from the git repo, but the .git directory is not passed into the image as +# it is excluded via .dockerignore. Run the following from a Windows PowerShell prompt +# (elevation is not required): (Note Go must be installed to run these tests) +# +# >> hack\make.ps1 -DCO -PkgImports -GoFormat + + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests, ensure you have created the nativebuildimage above. Then run one of +# the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) + + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests and binary build, ensure you have created the nativebuildimage above. Then +# run one of the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run nativebuildimage hack\make.ps1 -All +# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) + +# ----------------------------------------------------------------------------------------- + + +# Important notes: +# --------------- +# +# Don't attempt to use a bind-mount to pass a local directory as the bundles target +# directory. It does not work (golang attempts for follow a mapped folder incorrectly). +# Instead, use docker cp as per the example. +# +# go.zip is not removed from the image as it is used by the Windows CI servers +# to ensure the host and image are running consistent versions of go. +# +# Nanoserver support is a work in progress. Although the image will build if the +# FROM statement is updated, it will not work when running autogen through hack\make.ps1. +# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently +# quit due to the use of console hooks which are not available. +# +# The docker integration tests do not currently run in a container on Windows, predominantly +# due to Windows not supporting privileged mode, so anything using a volume would fail. +# They (along with the rest of the docker CI suite) can be run using +# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. +# +# ----------------------------------------------------------------------------------------- + + +# The number of build steps below are explicitly minimised to improve performance. +FROM microsoft/windowsservercore + +# Use PowerShell as the default shell +SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"] + +# Environment variable notes: +# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. +# - FROM_DOCKERFILE is used for detection of building within a container. +ENV GO_VERSION=1.8.3 ` + GIT_VERSION=2.11.1 ` + GOPATH=C:\go ` + FROM_DOCKERFILE=1 + +RUN ` + Function Test-Nano() { ` + $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` + return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` + }` + ` + Function Download-File([string] $source, [string] $target) { ` + if (Test-Nano) { ` + $handler = New-Object System.Net.Http.HttpClientHandler; ` + $client = New-Object System.Net.Http.HttpClient($handler); ` + $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` + $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` + $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` + $responseMsg.Wait(); ` + if (!$responseMsg.IsCanceled) { ` + $response = $responseMsg.Result; ` + if ($response.IsSuccessStatusCode) { ` + $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` + $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` + $copyStreamOp.Wait(); ` + $downloadedFileStream.Close(); ` + if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` + } ` + } else { ` + Throw ("Failed to download " + $source) ` + }` + } else { ` + $webClient = New-Object System.Net.WebClient; ` + $webClient.DownloadFile($source, $target); ` + } ` + } ` + ` + setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); ` + ` + Write-Host INFO: Downloading git...; ` + $location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; ` + Download-File $location C:\gitsetup.zip; ` + ` + Write-Host INFO: Downloading go...; ` + Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; ` + ` + Write-Host INFO: Downloading compiler 1 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` + ` + Write-Host INFO: Downloading compiler 2 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` + ` + Write-Host INFO: Downloading compiler 3 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` + ` + Write-Host INFO: Extracting git...; ` + Expand-Archive C:\gitsetup.zip C:\git-tmp; ` + New-Item -Type Directory C:\git | Out-Null; ` + Move-Item C:\git-tmp\tools\* C:\git\.; ` + Remove-Item -Recurse -Force C:\git-tmp; ` + ` + Write-Host INFO: Expanding go...; ` + Expand-Archive C:\go.zip -DestinationPath C:\; ` + ` + Write-Host INFO: Expanding compiler 1 of 3...; ` + Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 2 of 3...; ` + Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 3 of 3...; ` + Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` + ` + Write-Host INFO: Removing downloaded files...; ` + Remove-Item C:\gcc.zip; ` + Remove-Item C:\runtime.zip; ` + Remove-Item C:\binutils.zip; ` + Remove-Item C:\gitsetup.zip; ` + ` + Write-Host INFO: Creating source directory...; ` + New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; ` + ` + Write-Host INFO: Configuring git core.autocrlf...; ` + C:\git\cmd\git config --global core.autocrlf true; ` + ` + Write-Host INFO: Completed + +# Make PowerShell the default entrypoint +ENTRYPOINT ["powershell.exe"] + +# Set the working directory to the location of the sources +WORKDIR C:\go\src\github.com\docker\docker + +# Copy the sources into the container +COPY . . diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000..33b4e50 --- /dev/null +++ b/FAQ.md @@ -0,0 +1,24 @@ +# FAQ + +## What is the Moby Project? + +As described by Docker, the Moby Project is “an open framework to assemble specialized container systems.” You can read more at [the moby project website](https://mobyproject.org/) and [moby project introduction post](https://blog.docker.com/2017/04/introducing-the-moby-project/). + +## Why containers for IoT? + +We think that containers are essential to bringing modern development and deployment capabilities to connected devices. Linux containers, particularly Docker, offer, for the first time, a practical path to using virtualization on embedded devices, without heavy overhead or hardware abstraction layers that get in the way. + +## Why not just use Docker’s container engine on my device? + +Docker was primarily designed for datacenters with large, homogenous, well-networked servers. As such it makes tradeoffs that in some cases come in conflict with the need of small, heterogenous, remotely distributed, and differentiated devices, as found in IoT and embedded Linux use cases. + +With its small footprint and purpose-built features, balena was made specifically for IoT devices, or any scenario where footprint, bandwidth, power, storage, etc. are a concern. + +## What specific Docker features are excluded from balena? + +We left out Docker features that we saw as most needed in cloud deployments and therefore not warranting inclusion in a lightweight IoT-focused container engine. Specifically, we’ve excluded: +- Docker Swarm +- Cloud logging drivers +- Plugin support +- Overlay networking drivers +- Non-boltdb backed stores (consul, zookeeper, etcd, etc.) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9c8e20a --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 0000000..dc4485d --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,462 @@ +# Docker maintainers file +# +# This file describes who runs the docker/docker project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + + people = [ + "aaronlehmann", + "akihirosuda", + "albers", + "aluzzardi", + "anusha", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "ehazlett", + "estesp", + "icecrime", + "jhowardmsft", + "johnstep", + "justincormack", + "lk4d4", + "mavenugo", + "mhbauer", + "mlaventure", + "runcom", + "stevvooe", + "tianon", + "tibor", + "tonistiigi", + "unclejack", + "vdemeester", + "vieux", + "yongtang" + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "misty", + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "aboch", + "alexellis", + "andrewhsu", + "anonymuse", + "chanwit", + "ehazlett", + "fntlnz", + "gianarb", + "mgoelzer", + "programmerq", + "rheinwein", + "thajeztah" + ] + + [Org.Alumni] + + # This list contains maintainers that are no longer active on the project. + # It is thanks to these people that the project has become what it is today. + # Thank you! + + people = [ + # David Calavera contributed many features to Docker, such as an improved + # event system, dynamic configuration reloading, volume plugins, fancy + # new templating options, and an external client credential store. As a + # maintainer, David was release captain for Docker 1.8, and competing + # with Jess Frazelle to be "top dream killer". + # David is now doing amazing stuff as CTO for https://www.netlify.com, + # and tweets as @calavera. + "calavera", + + # As a maintainer, Erik was responsible for the "builder", and + # started the first designs for the new networking model in + # Docker. Erik is now working on all kinds of plugins for Docker + # (https://github.com/contiv) and various open source projects + # in his own repository https://github.com/erikh. You may + # still stumble into him in our issue tracker, or on IRC. + "erikh", + + # After a false start with his first PR being rejected, James Turnbull became a frequent + # contributor to the documentation, and became a docs maintainer on December 5, 2013. As + # a maintainer, James lifted the docs to a higher standard, and introduced the community + # guidelines ("three strikes"). James is currently changing the world as CTO of https://www.empatico.org, + # meanwhile authoring various books that are worth checking out. You can find him on Twitter, + # rambling as @kartar, and although no longer active as a maintainer, he's always "game" to + # help out reviewing docs PRs, so you may still see him around in the repository. + "jamtur01", + + # Jessica Frazelle, also known as the "Keyser Söze of containers", + # runs *everything* in containers. She started contributing to + # Docker with a (fun fun) change involving both iptables and regular + # expressions (coz, YOLO!) on July 10, 2014 + # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. + # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed + # many features and improvement, among which "seccomp profiles" (making + # containers a lot more secure). Besides being a maintainer, she + # set up the CI infrastructure for the project, giving everyone + # something to shout at if a PR failed ("noooo Janky!"). + # Jess is currently working on the DCOS security team at Mesosphere, + # and contributing to various open source projects. + # Be sure you don't miss her talks at a conference near you (a must-see), + # read her blog at https://blog.jessfraz.com (a must-read), and + # check out her open source projects on GitHub https://github.com/jessfraz (a must-try). + "jessfraz", + + # As a docs maintainer, Mary Anthony contributed greatly to the Docker + # docs. She wrote the Docker Contributor Guide and Getting Started + # Guides. She helped create a doc build system independent of + # docker/docker project, and implemented a new docs.docker.com theme and + # nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub + # public repositories was originally referenced in + # maryatdocker/docker-whale back in May 2015. + "moxiegirl", + + # Jana Radhakrishnan was part of the SocketPlane team that joined Docker. + # As a maintainer, he was the lead architect for the Container Network + # Model (CNM) implemented through libnetwork, and the "routing mesh" powering + # Swarm mode networking. + # + # Jana started new adventures in networking, but you can find him tweeting as @mrjana, + # coding on GitHub https://github.com/mrjana, and he may be hiding on the Docker Community + # slack channel :-) + "mrjana", + + # Sven Dowideit became a well known person in the Docker ecosphere, building + # boot2docker, and became a regular contributor to the project, starting as + # early as October 2013 (https://github.com/docker/docker/pull/2119), to become + # a maintainer less than two months later (https://github.com/docker/docker/pull/3061). + # + # As a maintainer, Sven took on the task to convert the documentation from + # ReStructuredText to Markdown, migrate to Hugo for generating the docs, and + # writing tooling for building, testing, and publishing them. + # + # If you're not in the occasion to visit "the Australian office", you + # can keep up with Sven on Twitter (@SvenDowideit), his blog http://fosiki.com, + # and of course on GitHub. + "sven", + + # Vincent "vbatts!" Batts made his first contribution to the project + # in November 2013, to become a maintainer a few months later, on + # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). + # As a maintainer, Vincent made important contributions to core elements + # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). + # He also contributed the "tar-split" library, an important element + # for the content-addressable store. + # Vincent is currently a member of the Open Containers Initiative + # Technical Oversight Board (TOB), besides his work at Red Hat and + # Project Atomic. You can still find him regularly hanging out in + # our repository and the #docker-dev and #docker-maintainers IRC channels + # for a chat, as he's always a lot of fun. + "vbatts", + + # Vishnu became a maintainer to help out on the daemon codebase and + # libcontainer integration. He's currently involved in the + # Open Containers Initiative, working on the specifications, + # besides his work on cAdvisor and Kubernetes for Google. + "vishh" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.alexellis] + Name = "Alex Ellis" + Email = "alexellis2@gmail.com" + GitHub = "alexellis" + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.albers] + Name = "Harald Albers" + Email = "github@albersweb.de" + GitHub = "albers" + + [people.andrewhsu] + Name = "Andrew Hsu" + Email = "andrewhsu@docker.com" + GitHub = "andrewhsu" + + [people.anonymuse] + Name = "Jesse White" + Email = "anonymuse@gmail.com" + GitHub = "anonymuse" + + [people.anusha] + Name = "Anusha Ragunathan" + Email = "anusha@docker.com" + GitHub = "anusha-ragunathan" + + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.coolljt0725] + Name = "Lei Jitang" + Email = "leijitang@huawei.com" + GitHub = "coolljt0725" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + Github = "cpuguy83" + + [people.chanwit] + Name = "Chanwit Kaewkasi" + Email = "chanwit@gmail.com" + GitHub = "chanwit" + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + GitHub = "duglin" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + GitHub = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + GitHub = "erikh" + + [people.estesp] + Name = "Phil Estes" + Email = "estesp@linux.vnet.ibm.com" + GitHub = "estesp" + + [people.fntlnz] + Name = "Lorenzo Fontana" + Email = "fontanalorenz@gmail.com" + GitHub = "fntlnz" + + [people.gianarb] + Name = "Gianluca Arbezzano" + Email = "ga@thumpflow.com" + GitHub = "gianarb" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "icecrime@gmail.com" + GitHub = "icecrime" + + [people.jamtur01] + Name = "James Turnbull" + Email = "james@lovedthanlost.net" + GitHub = "jamtur01" + + [people.jhowardmsft] + Name = "John Howard" + Email = "jhoward@microsoft.com" + GitHub = "jhowardmsft" + + [people.jessfraz] + Name = "Jessie Frazelle" + Email = "jess@linux.com" + GitHub = "jessfraz" + + [people.johnstep] + Name = "John Stephens" + Email = "johnstep@docker.com" + GitHub = "johnstep" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.mavenugo] + Name = "Madhu Venugopal" + Email = "madhu@docker.com" + GitHub = "mavenugo" + + [people.mgoelzer] + Name = "Mike Goelzer" + Email = "mike.goelzer@docker.com" + GitHub = "mgoelzer" + + [people.mhbauer] + Name = "Morgan Bauer" + Email = "mbauer@us.ibm.com" + GitHub = "mhbauer" + + [people.misty] + Name = "Misty Stanley-Jones" + Email = "misty@docker.com" + GitHub = "mstanleyjones" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + + [people.moxiegirl] + Name = "Mary Anthony" + Email = "mary.anthony@docker.com" + GitHub = "moxiegirl" + + [people.mrjana] + Name = "Jana Radhakrishnan" + Email = "mrjana@docker.com" + GitHub = "mrjana" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.rheinwein] + Name = "Laura Frank" + Email = "laura@codeship.com" + GitHub = "rheinwein" + + [people.runcom] + Name = "Antonio Murdaca" + Email = "runcom@redhat.com" + GitHub = "runcom" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + GitHub = "vbatts" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.vishh] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + GitHub = "vishh" + + [people.yongtang] + Name = "Yong Tang" + Email = "yong.tang.github@outlook.com" + GitHub = "yongtang" + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..57047c3 --- /dev/null +++ b/Makefile @@ -0,0 +1,202 @@ +.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win + +# set the graph driver as the current graphdriver if not set +DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) +export DOCKER_GRAPHDRIVER +DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1) +export DOCKER_INCREMENTAL_BINARY + +# get OS/Arch of docker engine +DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}') +DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') + +DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported) +export DOCKER_GITCOMMIT + +# env vars passed through directly to Docker's build scripts +# to allow things like `make KEEPBUNDLE=1 binary` easily +# `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e DOCKER_CROSSPLATFORMS \ + -e BUILD_APT_MIRROR \ + -e BUILDFLAGS \ + -e KEEPBUNDLE \ + -e DOCKER_BUILD_ARGS \ + -e DOCKER_BUILD_GOGC \ + -e DOCKER_BUILD_PKGS \ + -e DOCKER_BASH_COMPLETION_PATH \ + -e DOCKER_CLI_PATH \ + -e DOCKER_DEBUG \ + -e DOCKER_EXPERIMENTAL \ + -e DOCKER_GITCOMMIT \ + -e DOCKER_GRAPHDRIVER \ + -e DOCKER_INCREMENTAL_BINARY \ + -e DOCKER_PORT \ + -e DOCKER_REMAP_ROOT \ + -e DOCKER_STORAGE_OPTS \ + -e DOCKER_USERLANDPROXY \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e http_proxy \ + -e https_proxy \ + -e no_proxy +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` +# (default to no bind mount if DOCKER_HOST is set) +# note: BINDDIR is supported for backwards-compatibility here +BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) +DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") + +# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. +# The volume will be cleaned up when the container is removed due to `--rm`. +# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. +DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v $(CURDIR)/.git:/go/src/github.com/docker/docker/.git + +# This allows to set the docker-dev container name +DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),) + +# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set +PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo +PKGCACHE_VOLROOT := dockerdev-go-pkg-cache +PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-) +DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),) +DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,) +DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,) +DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION) + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) +DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) + +DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) +BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR)) +export BUILD_APT_MIRROR + +SWAGGER_DOCS_PORT ?= 9000 + +INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master) +INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker) + +define \n + + +endef + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + +default: binary + +all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives + $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' + +binary: build ## build the linux binaries + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +docker: build ## build the linux consolidate binary + $(DOCKER_RUN_DOCKER) hack/make.sh binary-docker + +build: bundles init-go-pkg-cache + $(warning The docker client CLI has moved to github.com/docker/cli. By default, it is built from the git sha specified in hack/dockerfile/binaries-commits. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n}) + docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . + +bundles: + mkdir bundles + +clean: clean-pkg-cache-vol ## clean up cached resources + +clean-pkg-cache-vol: + @- $(foreach mapping,$(PKGCACHE_MAP), \ + $(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \ + ) + +cross: build ## cross build the binaries for darwin, freebsd and\nwindows + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary-docker binary cross + +deb: build ## build the deb packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary-docker build-deb + + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +init-go-pkg-cache: + $(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g')) + +install: ## install the linux binaries + KEEPBUNDLE=1 hack/make.sh install-binary + +rpm: build ## build the rpm packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary-docker build-rpm + +run: build ## run the docker daemon in a container + $(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run" + +shell: build ## start a shell inside the build env + $(DOCKER_RUN_DOCKER) bash + +test: build ## run the unit, integration and docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary-docker cross test-unit test-integration-cli test-docker-py + +test-docker-py: build ## run the docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary-docker test-docker-py + +test-integration-cli: build ## run the integration tests + $(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary-docker test-integration-cli + +test-unit: build ## run the unit tests + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary-docker binary cross tgz + +validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor + $(DOCKER_RUN_DOCKER) hack/validate/all + +win: build ## cross build the binary for windows + $(DOCKER_RUN_DOCKER) hack/make.sh win + +.PHONY: swagger-gen +swagger-gen: + docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ + -w /go/src/github.com/docker/docker \ + --entrypoint hack/generate-swagger-api.sh \ + -e GOPATH=/go \ + quay.io/goswagger/swagger:0.7.4 + +.PHONY: swagger-docs +swagger-docs: ## preview the API documentation + @echo "API docs preview will be running at http://localhost:$(SWAGGER_DOCS_PORT)" + @docker run --rm -v $(PWD)/api/swagger.yaml:/usr/share/nginx/html/swagger.yaml \ + -e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \ + -p $(SWAGGER_DOCS_PORT):80 \ + bfirsh/redoc:1.6.2 +build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel + @echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)" + go build -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host + @echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)" + docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent + +# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on + @echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)" + $(eval tmp := integration-cli-worker-tmp) +# We mount pkgcache, but not bundle (bundle needs to be baked into the image) +# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here + docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top + docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary-docker + docker exec $(tmp) go build -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker + docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE) + docker rm -f $(tmp) diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..0c74e15 --- /dev/null +++ b/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/README.md b/README.md index 74b278c..e1617b8 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,96 @@ -# docker-engine -> This is docker engine package. -> The docker-engine contains docker daemon, cli, containerd, runc. - - -## Build in local PC -### gbs build -
-$ git clone ssh://{ID}@review.tizen.org:29418/platform/upstream/docker-engine
-$ cd docker-engine
-docker-engine $ git submodule init
-docker-engine $ git submodule update --recursive
-docker-engine $ sh packaging/gitmodule.sh
-docker-engine $ gbs build -A armv7l --include-all
-
+# build for Host PC (amd64) +``` +docker-static$ ./build.sh +# WARNING! I don't seem to be running in a Docker container. +# The result of this command might be an incorrect build, and will not be +# officially supported. +# +# Try this instead: make all +# + +---> Making bundle: binary-docker (in bundles/17.06.0-dev/binary-docker) +Building: bundles/17.06.0-dev/binary-docker/docker-17.06.0-dev +Created binary: bundles/17.06.0-dev/binary-docker/docker-17.06.0-dev + +docker/ +docker/docker-containerd-ctr +docker/docker +docker/docker-containerd-shim +docker/docker-proxy +docker/docker-containerd +docker/docker-runc +docker/dockerd + + +/docker-static$ file docker/docker +docker/docker: ELF 64-bit LSB executable, x86-64, version 1 (GNU/Linux), statically linked, for GNU/Linux 2.6.32, BuildID[sha1]=7232430f1b8490d29da63245ac68bf0ef4b02e25, stripped +``` + +# cross build for Target (arm7) +``` +$ ./build-arm.sh +# WARNING! I don't seem to be running in a Docker container. +# The result of this command might be an incorrect build, and will not be +# officially supported. +# +# Try this instead: make all +# + +bundles/17.06.0-dev already exists. Removing. + +---> Making bundle: binary-docker (in bundles/17.06.0-dev/binary-docker) +Building: bundles/17.06.0-dev/binary-docker/docker-17.06.0-dev + +Created binary: bundles/17.06.0-dev/binary-docker/docker-17.06.0-dev + +strip: Unable to recognise the format of the input file `docker/docker' + +$ file bundles/17.06.0-dev/binary-docker/docker-17.06.0-dev +bundles/17.06.0-dev/binary-docker/docker-17.06.0-dev: ELF 32-bit LSB executable, ARM, EABI5 version 1 (SYSV), statically linked, for GNU/Linux 3.2.0, BuildID[sha1]=9fb34777bc176a9e7d3beea408c6ac9d16c3e206, not stripped +``` + +# build rpm for target (arm7) +``` +$ ./rpmbuild.sh arm +------------------------------------- +Init-rpmbuild Env. +------------------------------------- +Delete existing rpmbuild folder +Create rpmbuild folder (/home/son/works/src/beluga/docker-engine/rpmbuild) +error: Architecture is not included: x86_64 +------------------------------------- +Pre-requisition +------------------------------------- +/home/son/works/src/beluga/docker-engine +/home/son/works/src/beluga/docker-engine/rpmbuild +├── BUILD +├── BUILDROOT +├── RPMS +├── SOURCES +│ ├── docker-engine.manifest +│ ├── docker-engine.service +│ ├── docker-engine.socket +│ └── docker-engine.tar.gz +├── SPECS +│ └── docker-engine.spec +└── SRPMS + +6 directories, 5 files +------------------------------------- +Build +------------------------------------- +Building target platforms: armv7l +Building for target armv7l +Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.XsKkMN + +... + +/home/son/works/src/beluga/docker-engine +/home/son/works/src/beluga/docker-engine/rpmbuild/RPMS +└── armv7l + └── docker-engine-17.06.0-0.5.armv7l.rpm + +1 directory, 1 file + +... Remove temp working directory /tmp/tmp.A1KwVC6NT7 +``` diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 0000000..05a8695 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,118 @@ +Docker Engine Roadmap +===================== + +### How should I use this document? + +This document provides description of items that the project decided to prioritize. This should +serve as a reference point for Docker contributors to understand where the project is going, and +help determine if a contribution could be conflicting with some longer terms plans. + +The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be +refused (except for those mentioned as "frozen features" below)! We are always happy to receive +patches for new cool features we haven't thought about, or didn't judge priority. Please however +understand that such patches might take longer for us to review. + +### How can I help? + +Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described +in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our +goal is to split down the workload in such way that anybody can jump in and help. Please comment on +issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already +assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is +the best way to go. + +### How can I add something to the roadmap? + +The roadmap process is new to the Docker Engine: we are only beginning to structure and document the +project objectives. Our immediate goal is to be more transparent, and work with our community to +focus our efforts on fewer prioritized topics. + +We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but +we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we +won't be accepting pull requests adding or removing items from this file. + +# 1. Features and refactoring + +## 1.1 Runtime improvements + +We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container +execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional +default libcontainer `execdriver`, but the Engine internals were not ready for this. + +As runC continued evolving, and the OCI specification along with it, we created +[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is +the new target for Engine integration, as it can entirely replace the whole `execdriver` +architecture, and container monitoring along with it. + +Docker Engine will rely on a long-running `containerd` companion daemon for all container execution +related operations. This could open the door in the future for Engine restarts without interrupting +running containers. + +## 1.2 Plugins improvements + +Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks +extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real +world feedback before optimizing for any particular workflow. + +In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of +plugins. This implies in particular making it trivially easy to distribute plugins as containers +through any Registry instance, as well as solving the commonly heard pain points of plugins needing +to be treated as somewhat special (being active at all time, started before any other user +containers, and not as easily dismissed). + +## 1.3 Internal decoupling + +A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the +API implementation has been refactored, and the Builder side of the daemon is now +[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in +the same repository. + +We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the +runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support +with the concept of "special" containers opens the door for bootstrapping more Engine internals +using the same facilities. + +## 1.4 Cluster capable Engine + +The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent +adding features such as multihost networking, and node discovery down at the Engine level. Yet, the +Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm +for that. + +We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the +Docker Engine being already capable of discovering each other and establish overlay networking for +their container to communicate, the next step is for a given Engine to gain ability to dispatch work +to another node in the cluster. This will be introduced in a backward compatible way, such that a +`docker run` invocation on a particular node remains fully deterministic. + +# 2. Frozen features + +## 2.1 Docker exec + +We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a +*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. + +## 2.2 Remote Registry Operations + +A large amount of work is ongoing in the area of image distribution and provenance. This includes +moving to the V2 Registry API and heavily refactoring the code that powers these features. The +desired result is more secure, reliable and easier to use image distribution. + +Part of the problem with this part of the code base is the lack of a stable and flexible interface. +If new features are added that access the registry without solidifying these interfaces, achieving +feature parity will continue to be elusive. While we get a handle on this situation, we are imposing +a moratorium on new code that accesses the Registry API in commands that don't already make remote +calls. + +Currently, only the following commands cause interaction with a remote registry: + + - push + - pull + - run + - build + - search + - login + +In the interest of stabilizing the registry access model during this ongoing work, we are not +accepting additions to other commands that will cause remote interaction with the Registry API. This +moratorium will lift when the goals of the distribution project have been met. diff --git a/VENDORING.md b/VENDORING.md new file mode 100644 index 0000000..8884f88 --- /dev/null +++ b/VENDORING.md @@ -0,0 +1,46 @@ +# Vendoring policies + +This document outlines recommended Vendoring policies for Docker repositories. +(Example, libnetwork is a Docker repo and logrus is not.) + +## Vendoring using tags + +Commit ID based vendoring provides little/no information about the updates +vendored. To fix this, vendors will now require that repositories use annotated +tags along with commit ids to snapshot commits. Annotated tags by themselves +are not sufficient, since the same tag can be force updated to reference +different commits. + +Each tag should: +- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") +- Have a corresponding entry in the change tracking document. + +Each repo should: +- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, +github releases file. + +The goal here is for consuming repos to be able to use the tag version and +changelog updates to determine whether the vendoring will cause any breaking or +backward incompatible changes. This also means that repos can specify having +dependency on a package of a specific version or greater up to the next major +release, without encountering breaking changes. + +## Semantic Versioning +Annotated version tags should follow [Semantic Versioning](http://semver.org) policies: + +"Given a version number MAJOR.MINOR.PATCH, increment the: + + 1. MAJOR version when you make incompatible API changes, + 2. MINOR version when you add functionality in a backwards-compatible manner, and + 3. PATCH version when you make backwards-compatible bug fixes. + +Additional labels for pre-release and build metadata are available as extensions +to the MAJOR.MINOR.PATCH format." + +## Vendoring cadence +In order to avoid huge vendoring changes, it is recommended to have a regular +cadence for vendoring updates. e.g. monthly. + +## Pre-merge vendoring tests +All related repos will be vendored into docker/docker. +CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..2d736aa --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +17.06.0-dev diff --git a/api/README.md b/api/README.md new file mode 100644 index 0000000..bb88132 --- /dev/null +++ b/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/api/common.go b/api/common.go new file mode 100644 index 0000000..859daf6 --- /dev/null +++ b/api/common.go @@ -0,0 +1,65 @@ +package api + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion string = "1.31" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/api/common_test.go b/api/common_test.go new file mode 100644 index 0000000..f466616 --- /dev/null +++ b/api/common_test.go @@ -0,0 +1,77 @@ +package api + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "os" +) + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + if err != nil { + t.Fatal(err) + } + + if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { + t.Fatal("expected an error, got nothing.") + } + +} + +func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + // Without the need to create the folder hierarchy + tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With the need to create the folder hierarchy as tmpKeyFie is in a path + // where some folders do not exist. + tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With no path at all + defer os.Remove("keyfile") + if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat("keyfile"); err != nil { + t.Fatalf("Expected to find a file keyfile, got %v", err) + } +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("fixtures", "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a key file, got : %v and %v", err, key) + } +} diff --git a/api/common_unix.go b/api/common_unix.go new file mode 100644 index 0000000..081e61c --- /dev/null +++ b/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api + +// MinVersion represents Minimum REST API version supported +const MinVersion string = "1.12" diff --git a/api/common_windows.go b/api/common_windows.go new file mode 100644 index 0000000..a6268a4 --- /dev/null +++ b/api/common_windows.go @@ -0,0 +1,8 @@ +package api + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/api/errors/errors.go b/api/errors/errors.go new file mode 100644 index 0000000..39d52e1 --- /dev/null +++ b/api/errors/errors.go @@ -0,0 +1,47 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestForbiddenError creates a new API error +// that has the 403 HTTP status code associated to it. +func NewRequestForbiddenError(err error) error { + return NewErrorWithStatusCode(err, http.StatusForbidden) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/api/errors/errors_test.go b/api/errors/errors_test.go new file mode 100644 index 0000000..1d6a596 --- /dev/null +++ b/api/errors/errors_test.go @@ -0,0 +1,64 @@ +package errors + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "net/http" + "testing" +) + +func newError(errorname string) error { + + return fmt.Errorf("test%v", errorname) +} + +func TestErrors(t *testing.T) { + errmsg := newError("apiError") + err := apiError{ + error: errmsg, + statusCode: 0, + } + assert.Equal(t, err.HTTPErrorStatusCode(), err.statusCode) + + errmsg = newError("ErrorWithStatusCode") + errcode := 1 + serr := NewErrorWithStatusCode(errmsg, errcode) + apierr, ok := serr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, errcode, apierr.statusCode) + + errmsg = newError("NewBadRequestError") + baderr := NewBadRequestError(errmsg) + apierr, ok = baderr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusBadRequest, apierr.statusCode) + + errmsg = newError("RequestForbiddenError") + ferr := NewRequestForbiddenError(errmsg) + apierr, ok = ferr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusForbidden, apierr.statusCode) + + errmsg = newError("RequestNotFoundError") + nerr := NewRequestNotFoundError(errmsg) + apierr, ok = nerr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusNotFound, apierr.statusCode) + + errmsg = newError("RequestConflictError") + cerr := NewRequestConflictError(errmsg) + apierr, ok = cerr.(apiError) + if !ok { + t.Fatal("excepted err is apiError type") + } + assert.Equal(t, http.StatusConflict, apierr.statusCode) + +} diff --git a/api/fixtures/keyfile b/api/fixtures/keyfile new file mode 100644 index 0000000..322f254 --- /dev/null +++ b/api/fixtures/keyfile @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY + +MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 +AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky +NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== +-----END EC PRIVATE KEY----- diff --git a/api/names.go b/api/names.go new file mode 100644 index 0000000..f147d1f --- /dev/null +++ b/api/names.go @@ -0,0 +1,9 @@ +package api + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/api/server/backend/build/backend.go b/api/server/backend/build/backend.go new file mode 100644 index 0000000..b5a758c --- /dev/null +++ b/api/server/backend/build/backend.go @@ -0,0 +1,90 @@ +package build + +import ( + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ImageComponent provides an interface for working with images +type ImageComponent interface { + SquashImage(from string, to string) (string, error) + TagImageWithReference(image.ID, string, reference.Named) error +} + +// Builder defines interface for running a build +type Builder interface { + Build(context.Context, backend.BuildConfig) (*builder.Result, error) +} + +// Backend provides build functionality to the API router +type Backend struct { + builder Builder + fsCache *fscache.FSCache + imageComponent ImageComponent +} + +// NewBackend creates a new build backend from components +func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache) (*Backend, error) { + return &Backend{imageComponent: components, builder: builder, fsCache: fsCache}, nil +} + +// Build builds an image from a Source +func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) { + options := config.Options + tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags) + if err != nil { + return "", err + } + + build, err := b.builder.Build(ctx, config) + if err != nil { + return "", err + } + + var imageID = build.ImageID + if options.Squash { + if imageID, err = squashBuild(build, b.imageComponent); err != nil { + return "", err + } + if config.ProgressWriter.AuxFormatter != nil { + if err = config.ProgressWriter.AuxFormatter.Emit(types.BuildResult{ID: imageID}); err != nil { + return "", err + } + } + } + + stdout := config.ProgressWriter.StdoutFormatter + fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) + err = tagger.TagImages(image.ID(imageID)) + return imageID, err +} + +// PruneCache removes all cached build sources +func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) { + size, err := b.fsCache.Prune() + if err != nil { + return nil, errors.Wrap(err, "failed to prune build cache") + } + return &types.BuildCachePruneReport{SpaceReclaimed: size}, nil +} + +func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) { + var fromID string + if build.FromImage != nil { + fromID = build.FromImage.ImageID() + } + imageID, err := imageComponent.SquashImage(build.ImageID, fromID) + if err != nil { + return "", errors.Wrap(err, "error squashing image") + } + return imageID, nil +} diff --git a/api/server/backend/build/tag.go b/api/server/backend/build/tag.go new file mode 100644 index 0000000..7bd5dcd --- /dev/null +++ b/api/server/backend/build/tag.go @@ -0,0 +1,84 @@ +package build + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// Tagger is responsible for tagging an image created by a builder +type Tagger struct { + imageComponent ImageComponent + stdout io.Writer + repoAndTags []reference.Named +} + +// NewTagger returns a new Tagger for tagging the images of a build. +// If any of the names are invalid tags an error is returned. +func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) { + reposAndTags, err := sanitizeRepoAndTags(names) + if err != nil { + return nil, err + } + return &Tagger{ + imageComponent: backend, + stdout: stdout, + repoAndTags: reposAndTags, + }, nil +} + +// TagImages creates image tags for the imageID +func (bt *Tagger) TagImages(imageID image.ID) error { + for _, rt := range bt.repoAndTags { + // TODO @jhowardmsft LCOW support. Will need revisiting. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + if err := bt.imageComponent.TagImageWithReference(imageID, platform, rt); err != nil { + return err + } + fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) + } + return nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNormalizedNamed(repo) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + ref = reference.TagNameOnly(ref) + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} diff --git a/api/server/httputils/decoder.go b/api/server/httputils/decoder.go new file mode 100644 index 0000000..458eac5 --- /dev/null +++ b/api/server/httputils/decoder.go @@ -0,0 +1,16 @@ +package httputils + +import ( + "io" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// ContainerDecoder specifies how +// to translate an io.Reader into +// container configuration. +type ContainerDecoder interface { + DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) + DecodeHostConfig(src io.Reader) (*container.HostConfig, error) +} diff --git a/api/server/httputils/errors.go b/api/server/httputils/errors.go new file mode 100644 index 0000000..82da21c --- /dev/null +++ b/api/server/httputils/errors.go @@ -0,0 +1,145 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + + for _, status := range []struct { + keyword string + code int + }{ + {"not found", http.StatusNotFound}, + {"cannot find", http.StatusNotFound}, + {"no such", http.StatusNotFound}, + {"bad parameter", http.StatusBadRequest}, + {"no command", http.StatusBadRequest}, + {"conflict", http.StatusConflict}, + {"impossible", http.StatusNotAcceptable}, + {"wrong login/password", http.StatusUnauthorized}, + {"unauthorized", http.StatusUnauthorized}, + {"hasn't been activated", http.StatusForbidden}, + {"this node", http.StatusServiceUnavailable}, + {"needs to be unlocked", http.StatusServiceUnavailable}, + {"certificates have expired", http.StatusServiceUnavailable}, + {"repository does not exist", http.StatusNotFound}, + } { + if strings.Contains(errStr, status.keyword) { + statusCode = status.code + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +func apiVersionSupportsJSONErrors(version string) bool { + const firstAPIVersionWithJSONErrors = "1.23" + return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors) +} + +// MakeErrorHandler makes an HTTP handler that decodes a Docker error and +// returns it in the response. +func MakeErrorHandler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + statusCode := GetHTTPErrorStatusCode(err) + vars := mux.Vars(r) + if apiVersionSupportsJSONErrors(vars["version"]) { + response := &types.ErrorResponse{ + Message: err.Error(), + } + WriteJSON(w, statusCode, response) + } else { + http.Error(w, grpc.ErrorDesc(err), statusCode) + } + } +} + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch grpc.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} diff --git a/api/server/httputils/form.go b/api/server/httputils/form.go new file mode 100644 index 0000000..78bd379 --- /dev/null +++ b/api/server/httputils/form.go @@ -0,0 +1,70 @@ +package httputils + +import ( + "errors" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above. +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + return value, err + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, errors.New("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, errors.New("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/api/server/httputils/form_test.go b/api/server/httputils/form_test.go new file mode 100644 index 0000000..bc790e9 --- /dev/null +++ b/api/server/httputils/form_test.go @@ -0,0 +1,105 @@ +package httputils + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := BoolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestBoolValueOrDefault(t *testing.T) { + r, _ := http.NewRequest("GET", "", nil) + if !BoolValueOrDefault(r, "queryparam", true) { + t.Fatal("Expected to get true default value, got false") + } + + v := url.Values{} + v.Set("param", "") + r, _ = http.NewRequest("GET", "", nil) + r.Form = v + if BoolValueOrDefault(r, "param", true) { + t.Fatal("Expected not to get true") + } +} + +func TestInt64ValueOrZero(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := Int64ValueOrZero(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestInt64ValueOrDefault(t *testing.T) { + cases := map[string]int64{ + "": -1, + "-1": -1, + "42": 42, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a, err := Int64ValueOrDefault(r, "test", -1) + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + if err != nil { + t.Fatalf("Error should be nil, but received: %s", err) + } + } +} + +func TestInt64ValueOrDefaultWithError(t *testing.T) { + v := url.Values{} + v.Set("test", "invalid") + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + _, err := Int64ValueOrDefault(r, "test", -1) + if err == nil { + t.Fatal("Expected an error.") + } +} diff --git a/api/server/httputils/httputils.go b/api/server/httputils/httputils.go new file mode 100644 index 0000000..92cb67c --- /dev/null +++ b/api/server/httputils/httputils.go @@ -0,0 +1,97 @@ +package httputils + +import ( + "fmt" + "io" + "mime" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if matchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + + if val := ctx.Value(APIVersionKey); val != nil { + return val.(string) + } + + return "" +} + +// matchesContentType validates the content type against the expected one +func matchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} diff --git a/api/server/httputils/httputils_test.go b/api/server/httputils/httputils_test.go new file mode 100644 index 0000000..d551b9d --- /dev/null +++ b/api/server/httputils/httputils_test.go @@ -0,0 +1,18 @@ +package httputils + +import "testing" + +// matchesContentType +func TestJsonContentType(t *testing.T) { + if !matchesContentType("application/json", "application/json") { + t.Fail() + } + + if !matchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if matchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} diff --git a/api/server/httputils/httputils_write_json.go b/api/server/httputils/httputils_write_json.go new file mode 100644 index 0000000..562c127 --- /dev/null +++ b/api/server/httputils/httputils_write_json.go @@ -0,0 +1,15 @@ +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + return enc.Encode(v) +} diff --git a/api/server/httputils/write_log_stream.go b/api/server/httputils/write_log_stream.go new file mode 100644 index 0000000..891e5f0 --- /dev/null +++ b/api/server/httputils/write_log_stream.go @@ -0,0 +1,94 @@ +package httputils + +import ( + "fmt" + "io" + "net/url" + "sort" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/stdcopy" +) + +// WriteLogStream writes an encoded byte stream of log messages from the +// messages channel, multiplexing them with a stdcopy.Writer if mux is true +func WriteLogStream(ctx context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) { + wf := ioutils.NewWriteFlusher(w) + defer wf.Close() + + wf.Flush() + + // this might seem like doing below is clear: + // var outStream io.Writer = wf + // however, this GREATLY DISPLEASES golint, and if you do that, it will + // fail CI. we need outstream to be type writer because if we mux streams, + // we will need to reassign all of the streams to be stdwriters, which only + // conforms to the io.Writer interface. + var outStream io.Writer + outStream = wf + errStream := outStream + sysErrStream := errStream + if mux { + sysErrStream = stdcopy.NewStdWriter(outStream, stdcopy.Systemerr) + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + msg, ok := <-msgs + if !ok { + return + } + // check if the message contains an error. if so, write that error + // and exit + if msg.Err != nil { + fmt.Fprintf(sysErrStream, "Error grabbing logs: %v\n", msg.Err) + continue + } + logLine := msg.Line + if config.Details { + logLine = append([]byte(stringAttrs(msg.Attrs)+" "), logLine...) + } + if config.Timestamps { + // TODO(dperny) the format is defined in + // daemon/logger/logger.go as logger.TimeFormat. importing + // logger is verboten (not part of backend) so idk if just + // importing the same thing from jsonlog is good enough + logLine = append([]byte(msg.Timestamp.Format(jsonlog.RFC3339NanoFixed)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } +} + +type byKey []string + +func (s byKey) Len() int { return len(s) } +func (s byKey) Less(i, j int) bool { + keyI := strings.Split(s[i], "=") + keyJ := strings.Split(s[j], "=") + return keyI[0] < keyJ[0] +} +func (s byKey) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func stringAttrs(a backend.LogAttributes) string { + var ss byKey + for k, v := range a { + k, v := url.QueryEscape(k), url.QueryEscape(v) + ss = append(ss, k+"="+v) + } + sort.Sort(ss) + return strings.Join(ss, ",") +} diff --git a/api/server/middleware.go b/api/server/middleware.go new file mode 100644 index 0000000..537ce80 --- /dev/null +++ b/api/server/middleware.go @@ -0,0 +1,24 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" +) + +// handlerWithGlobalMiddlewares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + for _, m := range s.middlewares { + next = m.WrapHandler(next) + } + + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + return next +} diff --git a/api/server/middleware/cors.go b/api/server/middleware/cors.go new file mode 100644 index 0000000..ea725db --- /dev/null +++ b/api/server/middleware/cors.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// CORSMiddleware injects CORS headers to each request +// when it's configured. +type CORSMiddleware struct { + defaultHeaders string +} + +// NewCORSMiddleware creates a new CORSMiddleware with default headers. +func NewCORSMiddleware(d string) CORSMiddleware { + return CORSMiddleware{defaultHeaders: d} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := c.defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") + return handler(ctx, w, r, vars) + } +} diff --git a/api/server/middleware/debug.go b/api/server/middleware/debug.go new file mode 100644 index 0000000..b931f19 --- /dev/null +++ b/api/server/middleware/debug.go @@ -0,0 +1,76 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + maskSecretKeys(postForm) + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} + +func maskSecretKeys(inp interface{}) { + if arr, ok := inp.([]interface{}); ok { + for _, f := range arr { + maskSecretKeys(f) + } + return + } + if form, ok := inp.(map[string]interface{}); ok { + loop0: + for k, v := range form { + for _, m := range []string{"password", "secret", "jointoken", "unlockkey", "signingcakey"} { + if strings.EqualFold(m, k) { + form[k] = "*****" + continue loop0 + } + } + maskSecretKeys(v) + } + } +} diff --git a/api/server/middleware/experimental.go b/api/server/middleware/experimental.go new file mode 100644 index 0000000..b8f56e8 --- /dev/null +++ b/api/server/middleware/experimental.go @@ -0,0 +1,29 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// ExperimentalMiddleware is a the middleware in charge of adding the +// 'Docker-Experimental' header to every outgoing request +type ExperimentalMiddleware struct { + experimental string +} + +// NewExperimentalMiddleware creates a new ExperimentalMiddleware +func NewExperimentalMiddleware(experimentalEnabled bool) ExperimentalMiddleware { + if experimentalEnabled { + return ExperimentalMiddleware{"true"} + } + return ExperimentalMiddleware{"false"} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (e ExperimentalMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Docker-Experimental", e.experimental) + return handler(ctx, w, r, vars) + } +} diff --git a/api/server/middleware/middleware.go b/api/server/middleware/middleware.go new file mode 100644 index 0000000..dc1f5bf --- /dev/null +++ b/api/server/middleware/middleware.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Middleware is an interface to allow the use of ordinary functions as Docker API filters. +// Any struct that has the appropriate signature can be registered as a middleware. +type Middleware interface { + WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error +} diff --git a/api/server/middleware/version.go b/api/server/middleware/version.go new file mode 100644 index 0000000..390a7f0 --- /dev/null +++ b/api/server/middleware/version.go @@ -0,0 +1,51 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VersionMiddleware is a middleware that +// validates the client and server versions. +type VersionMiddleware struct { + serverVersion string + defaultVersion string + minVersion string +} + +// NewVersionMiddleware creates a new VersionMiddleware +// with the default versions. +func NewVersionMiddleware(s, d, m string) VersionMiddleware { + return VersionMiddleware{ + serverVersion: s, + defaultVersion: d, + minVersion: m, + } +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := vars["version"] + if apiVersion == "" { + apiVersion = v.defaultVersion + } + + if versions.LessThan(apiVersion, v.minVersion) { + return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion)) + } + + header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + w.Header().Set("API-Version", v.defaultVersion) + w.Header().Set("OSType", runtime.GOOS) + ctx = context.WithValue(ctx, "api-version", apiVersion) + return handler(ctx, w, r, vars) + } + +} diff --git a/api/server/middleware/version_test.go b/api/server/middleware/version_test.go new file mode 100644 index 0000000..29787bf --- /dev/null +++ b/api/server/middleware/version_test.go @@ -0,0 +1,57 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +func TestVersionMiddleware(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + if err := h(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} + +func TestVersionMiddlewareWithErrors(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + vars := map[string]string{"version": "0.1"} + err := h(ctx, resp, req, vars) + + if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") { + t.Fatalf("Expected too old client error, got %v", err) + } +} diff --git a/api/server/profiler.go b/api/server/profiler.go new file mode 100644 index 0000000..d49be33 --- /dev/null +++ b/api/server/profiler.go @@ -0,0 +1,46 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +const debugPathPrefix = "/debug/" + +func profilerSetup(mainRouter *mux.Router) { + var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/trace", pprof.Trace) + r.HandleFunc("/pprof/{name}", handlePprof) +} + +func handlePprof(w http.ResponseWriter, r *http.Request) { + var name string + if vars := mux.Vars(r); vars != nil { + name = vars["name"] + } + pprof.Handler(name).ServeHTTP(w, r) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintln(w, "{") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintln(w, ",") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintln(w, "\n}") +} diff --git a/api/server/router/build/backend.go b/api/server/router/build/backend.go new file mode 100644 index 0000000..defffd3 --- /dev/null +++ b/api/server/router/build/backend.go @@ -0,0 +1,21 @@ +package build + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build a Docker image returning the id of the image + // TODO: make this return a reference instead of string + Build(context.Context, backend.BuildConfig) (string, error) + + // Prune build cache + PruneCache(context.Context) (*types.BuildCachePruneReport, error) +} + +type experimentalProvider interface { + HasExperimental() bool +} diff --git a/api/server/router/build/build.go b/api/server/router/build/build.go new file mode 100644 index 0000000..78f5ae2 --- /dev/null +++ b/api/server/router/build/build.go @@ -0,0 +1,29 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + daemon experimentalProvider + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend, d experimentalProvider) router.Router { + r := &buildRouter{backend: b, daemon: d} + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.NewPostRoute("/build", r.postBuild, router.WithCancel), + router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel), + } +} diff --git a/api/server/router/build/build_routes.go b/api/server/router/build/build_routes.go new file mode 100644 index 0000000..81001e8 --- /dev/null +++ b/api/server/router/build/build_routes.go @@ -0,0 +1,262 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + units "github.com/docker/go-units" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.NetworkMode = r.FormValue("networkmode") + options.Tags = r.Form["t"] + options.ExtraHosts = r.Form["extrahosts"] + options.SecurityOpt = r.Form["securityopt"] + options.Squash = httputils.BoolValue(r, "squash") + options.Target = r.FormValue("target") + options.RemoteContext = r.FormValue("remote") + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + if runtime.GOOS != "windows" && options.SecurityOpt != nil { + return nil, fmt.Errorf("The daemon on this platform does not support setting security options on build") + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + // Note that there are two ways a --build-arg might appear in the + // json of the query param: + // "foo":"bar" + // and "foo":nil + // The first is the normal case, ie. --build-arg foo=bar + // or --build-arg foo + // where foo's value was picked up from an env var. + // The second ("foo":nil) is where they put --build-arg foo + // but "foo" isn't set as an env var. In that case we can't just drop + // the fact they mentioned it, we need to pass that along to the builder + // so that it can print a warning about "foo" being unused if there is + // no "ARG foo" in the Dockerfile. + buildArgsJSON := r.FormValue("buildargs") + if buildArgsJSON != "" { + var buildArgs = map[string]*string{} + if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + var labels = map[string]string{} + if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil { + return nil, err + } + options.Labels = labels + } + + cacheFromJSON := r.FormValue("cachefrom") + if cacheFromJSON != "" { + var cacheFrom = []string{} + if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil { + return nil, err + } + options.CacheFrom = cacheFrom + } + options.SessionID = r.FormValue("session") + + var volumes = []string{} + volumesJSON := r.FormValue("volumes") + if volumesJSON != "" { + if err := json.Unmarshal([]byte(volumesJSON), &volumes); err != nil { + return nil, err + } + options.Volumes = volumes + } + + return options, nil +} + +func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + report, err := br.backend.PruneCache(ctx) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, report) +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + notVerboseBuffer = bytes.NewBuffer(nil) + version = httputils.VersionFromContext(ctx) + ) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(streamformatter.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + buildOptions.AuthConfigs = getAuthConfigs(r.Header) + + if buildOptions.Squash && !br.daemon.HasExperimental() { + return apierrors.NewBadRequestError( + errors.New("squash is only supported with experimental mode")) + } + + out := io.Writer(output) + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := streamformatter.NewJSONProgressOutput(out, true) + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext) + } + + wantAux := versions.GreaterThanOrEqualTo(version, "1.30") + + imgID, err := br.backend.Build(ctx, backend.BuildConfig{ + Source: r.Body, + Options: buildOptions, + ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader), + }) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID) + } + return nil +} + +func getAuthConfigs(header http.Header) map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + authConfigsEncoded := header.Get("X-Registry-Config") + + if authConfigsEncoded == "" { + return authConfigs + } + + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + // Pulling an image does not error when no auth is provided so to remain + // consistent with the existing api decode errors are ignored + json.NewDecoder(authConfigsJSON).Decode(&authConfigs) + return authConfigs +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(io.ReadCloser) io.ReadCloser) backend.ProgressWriter { + out = &syncWriter{w: out} + + var aux *streamformatter.AuxFormatter + if wantAux { + aux = &streamformatter.AuxFormatter{Writer: out} + } + + return backend.ProgressWriter{ + Output: out, + StdoutFormatter: streamformatter.NewStdoutWriter(out), + StderrFormatter: streamformatter.NewStderrWriter(out), + AuxFormatter: aux, + ProgressReaderFunc: createProgressReader, + } +} diff --git a/api/server/router/checkpoint/backend.go b/api/server/router/checkpoint/backend.go new file mode 100644 index 0000000..8810f88 --- /dev/null +++ b/api/server/router/checkpoint/backend.go @@ -0,0 +1,10 @@ +package checkpoint + +import "github.com/docker/docker/api/types" + +// Backend for Checkpoint +type Backend interface { + CheckpointCreate(container string, config types.CheckpointCreateOptions) error + CheckpointDelete(container string, config types.CheckpointDeleteOptions) error + CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/api/server/router/checkpoint/checkpoint.go b/api/server/router/checkpoint/checkpoint.go new file mode 100644 index 0000000..b169718 --- /dev/null +++ b/api/server/router/checkpoint/checkpoint.go @@ -0,0 +1,36 @@ +package checkpoint + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// checkpointRouter is a router to talk with the checkpoint controller +type checkpointRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new checkpoint router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &checkpointRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the checkpoint controller +func (r *checkpointRouter) Routes() []router.Route { + return r.routes +} + +func (r *checkpointRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental), + router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental), + router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental), + } +} diff --git a/api/server/router/checkpoint/checkpoint_routes.go b/api/server/router/checkpoint/checkpoint_routes.go new file mode 100644 index 0000000..f988431 --- /dev/null +++ b/api/server/router/checkpoint/checkpoint_routes.go @@ -0,0 +1,65 @@ +package checkpoint + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var options types.CheckpointCreateOptions + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&options); err != nil { + return err + } + + err := s.backend.CheckpointCreate(vars["name"], options) + if err != nil { + return err + } + + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{ + CheckpointDir: r.Form.Get("dir"), + }) + + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, checkpoints) +} + +func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{ + CheckpointDir: r.Form.Get("dir"), + CheckpointID: vars["checkpoint"], + }) + + if err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go new file mode 100644 index 0000000..d51ed81 --- /dev/null +++ b/api/server/router/container/backend.go @@ -0,0 +1,79 @@ +package container + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(name string, config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds *int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version string) (interface{}, error) + ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// systemBackend includes functions to implement to provide system wide containers functionality +type systemBackend interface { + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend + systemBackend +} diff --git a/api/server/router/container/container.go b/api/server/router/container/container.go new file mode 100644 index 0000000..24c3224 --- /dev/null +++ b/api/server/router/container/container.go @@ -0,0 +1,77 @@ +package container + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &containerRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs, router.WithCancel), + router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats, router.WithCancel), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait, router.WithCancel), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + router.NewPostRoute("/containers/prune", r.postContainersPrune, router.WithCancel), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go new file mode 100644 index 0000000..0032fea --- /dev/null +++ b/api/server/router/container/container_routes.go @@ -0,0 +1,608 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(ctx, vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + containerName := vars["name"] + logsConfig := &types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + } + + // doesn't matter what version the client is on, we're using this internally only + // also do we need size? i'm thinkin no we don't + raw, err := s.backend.ContainerInspect(containerName, false, api.DefaultVersion) + if err != nil { + return err + } + container, ok := raw.(*types.ContainerJSON) + if !ok { + // %T prints the type. handy! + return fmt.Errorf("expected container to be *types.ContainerJSON but got %T", raw) + } + + msgs, err := s.backend.ContainerLogs(ctx, containerName, logsConfig) + if err != nil { + return err + } + + // if has a tty, we're not muxing streams. if it doesn't, we are. simple. + // this is the point of no return for writing a response. once we call + // WriteLogStream, the response has been started and errors will be + // returned in band by WriteLogStream + httputils.WriteLogStream(ctx, w, msgs, logsConfig, !container.Config.Tty) + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + + version := httputils.VersionFromContext(ctx) + var hostConfig *container.HostConfig + // A non-nil json object is at least 7 characters. + if r.ContentLength > 7 || r.ContentLength == -1 { + if versions.GreaterThanOrEqualTo(version, "1.24") { + return validationError{fmt.Errorf("starting container with non-empty request body was deprecated since v1.10 and removed in v1.12")} + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := s.decoder.DecodeHostConfig(r.Body) + if err != nil { + return err + } + hostConfig = c + } + + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoint := r.Form.Get("checkpoint") + checkpointDir := r.Form.Get("checkpoint-dir") + if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Behavior changed in version 1.30 to handle wait condition and to + // return headers immediately. + version := httputils.VersionFromContext(ctx) + legacyBehavior := versions.LessThan(version, "1.30") + + // The wait condition defaults to "not-running". + waitCondition := containerpkg.WaitConditionNotRunning + if !legacyBehavior { + if err := httputils.ParseForm(r); err != nil { + return err + } + switch container.WaitCondition(r.Form.Get("condition")) { + case container.WaitConditionNextExit: + waitCondition = containerpkg.WaitConditionNextExit + case container.WaitConditionRemoved: + waitCondition = containerpkg.WaitConditionRemoved + } + } + + // Note: the context should get canceled if the client closes the + // connection since this handler has been wrapped by the + // router.WithCancel() wrapper. + waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + if !legacyBehavior { + // Write response header immediately. + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } + + // Block on the result of the wait operation. + status := <-waitC + + return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{ + StatusCode: int64(status.ExitCode()), + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + resp, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := versions.LessThan(version, "1.19") + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(version, "1.25") { + hostConfig.AutoRemove = false + } + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + detachKeys := r.FormValue("detachKeys") + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + MuxStreams: true, + } + + if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + // Remember to close stream if error happens + conn, _, errHijack := hijacker.Hijack() + if errHijack == nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + statusText := http.StatusText(statusCode) + fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) + httputils.CloseStreams(conn) + } else { + logrus.Errorf("Error Hijacking: %v", err) + } + } + return nil +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var err error + detachKeys := r.FormValue("detachKeys") + + done := make(chan struct{}) + started := make(chan struct{}) + + version := httputils.VersionFromContext(ctx) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + // In case version 1.28 and above, a binary frame will be sent. + // See 28176 for details. + if versions.GreaterThanOrEqualTo(version, "1.28") { + conn.PayloadType = websocket.BinaryFrame + } + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} + +func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/api/server/router/container/copy.go b/api/server/router/container/copy.go new file mode 100644 index 0000000..5cfe8d7 --- /dev/null +++ b/api/server/router/container/copy.go @@ -0,0 +1,118 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Deprecated since 1.8, Errors out since 1.12 + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.24") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, data) + return err +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + copyUIDGID := httputils.BoolValue(r, "copyUIDGID") + + return s.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body) +} diff --git a/api/server/router/container/exec.go b/api/server/router/container/exec.go new file mode 100644 index 0000000..1134a0e --- /dev/null +++ b/api/server/router/container/exec.go @@ -0,0 +1,140 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(name, execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if versions.GreaterThan(version, "1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n") + } else { + fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n") + } + + // copy headers that were removed as part of hijack + if err := w.Header().WriteSubset(outStream, nil); err != nil { + return err + } + fmt.Fprint(outStream, "\r\n") + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + // Maybe we should we pass ctx here if we're not detaching? + if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error() + "\r\n")) + logrus.Errorf("Error running exec in container: %v", err) + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/api/server/router/container/inspect.go b/api/server/router/container/inspect.go new file mode 100644 index 0000000..dbbced7 --- /dev/null +++ b/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/api/server/router/distribution/backend.go b/api/server/router/distribution/backend.go new file mode 100644 index 0000000..fc3a80e --- /dev/null +++ b/api/server/router/distribution/backend.go @@ -0,0 +1,14 @@ +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) +} diff --git a/api/server/router/distribution/distribution.go b/api/server/router/distribution/distribution.go new file mode 100644 index 0000000..c1fb7bc --- /dev/null +++ b/api/server/router/distribution/distribution.go @@ -0,0 +1,31 @@ +package distribution + +import "github.com/docker/docker/api/server/router" + +// distributionRouter is a router to talk with the registry +type distributionRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new distribution router +func NewRouter(backend Backend) router.Router { + r := &distributionRouter{ + backend: backend, + } + r.initRoutes() + return r +} + +// Routes returns the available routes +func (r *distributionRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the distribution router +func (r *distributionRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo), + } +} diff --git a/api/server/router/distribution/distribution_routes.go b/api/server/router/distribution/distribution_routes.go new file mode 100644 index 0000000..cc9e66a --- /dev/null +++ b/api/server/router/distribution/distribution_routes.go @@ -0,0 +1,138 @@ +package distribution + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strings" + + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + var ( + config = &types.AuthConfig{} + authEncoded = r.Header.Get("X-Registry-Auth") + distributionInspect registrytypes.DistributionInspect + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + + image := vars["name"] + + ref, err := reference.ParseAnyReference(image) + if err != nil { + return err + } + namedRef, ok := ref.(reference.Named) + if !ok { + if _, ok := ref.(reference.Digested); ok { + // full image ID + return errors.Errorf("no manifest found for full image ID") + } + return errors.Errorf("unknown image reference format: %s", image) + } + + distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config) + if err != nil { + return err + } + blobsrvc := distrepo.Blobs(ctx) + + if canonicalRef, ok := namedRef.(reference.Canonical); !ok { + namedRef = reference.TagNameOnly(namedRef) + + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return errors.Errorf("image reference not tagged: %s", image) + } + + descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag()) + if err != nil { + return err + } + distributionInspect.Descriptor = v1.Descriptor{ + MediaType: descriptor.MediaType, + Digest: descriptor.Digest, + Size: descriptor.Size, + } + } else { + // TODO(nishanttotla): Once manifests can be looked up as a blob, the + // descriptor should be set using blobsrvc.Stat(ctx, canonicalRef.Digest()) + // instead of having to manually fill in the fields + distributionInspect.Descriptor.Digest = canonicalRef.Digest() + } + + // we have a digest, so we can retrieve the manifest + mnfstsrvc, err := distrepo.Manifests(ctx) + if err != nil { + return err + } + mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest) + if err != nil { + return err + } + + mediaType, payload, err := mnfst.Payload() + if err != nil { + return err + } + // update MediaType because registry might return something incorrect + distributionInspect.Descriptor.MediaType = mediaType + if distributionInspect.Descriptor.Size == 0 { + distributionInspect.Descriptor.Size = int64(len(payload)) + } + + // retrieve platform information depending on the type of manifest + switch mnfstObj := mnfst.(type) { + case *manifestlist.DeserializedManifestList: + for _, m := range mnfstObj.Manifests { + distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{ + Architecture: m.Platform.Architecture, + OS: m.Platform.OS, + OSVersion: m.Platform.OSVersion, + OSFeatures: m.Platform.OSFeatures, + Variant: m.Platform.Variant, + }) + } + case *schema2.DeserializedManifest: + configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest) + var platform v1.Platform + if err == nil { + err := json.Unmarshal(configJSON, &platform) + if err == nil && (platform.OS != "" || platform.Architecture != "") { + distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + } + } + case *schema1.SignedManifest: + platform := v1.Platform{ + Architecture: mnfstObj.Architecture, + OS: "linux", + } + distributionInspect.Platforms = append(distributionInspect.Platforms, platform) + } + + return httputils.WriteJSON(w, http.StatusOK, distributionInspect) +} diff --git a/api/server/router/experimental.go b/api/server/router/experimental.go new file mode 100644 index 0000000..ac31f04 --- /dev/null +++ b/api/server/router/experimental.go @@ -0,0 +1,67 @@ +package router + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" +) + +var ( + errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon in experimental mode in order to enable it.") +) + +// ExperimentalRoute defines an experimental API route that can be enabled or disabled. +type ExperimentalRoute interface { + Route + + Enable() + Disable() +} + +// experimentalRoute defines an experimental API route that can be enabled or disabled. +// It implements ExperimentalRoute +type experimentalRoute struct { + local Route + handler httputils.APIFunc +} + +// Enable enables this experimental route +func (r *experimentalRoute) Enable() { + r.handler = r.local.Handler() +} + +// Disable disables the experimental route +func (r *experimentalRoute) Disable() { + r.handler = experimentalHandler +} + +func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return apierrors.NewErrorWithStatusCode(errExperimentalFeature, http.StatusNotImplemented) +} + +// Handler returns returns the APIFunc to let the server wrap it in middlewares. +func (r *experimentalRoute) Handler() httputils.APIFunc { + return r.handler +} + +// Method returns the http method that the route responds to. +func (r *experimentalRoute) Method() string { + return r.local.Method() +} + +// Path returns the subpath where the route responds to. +func (r *experimentalRoute) Path() string { + return r.local.Path() +} + +// Experimental will mark a route as experimental. +func Experimental(r Route) Route { + return &experimentalRoute{ + local: r, + handler: experimentalHandler, + } +} diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go new file mode 100644 index 0000000..f3e92a1 --- /dev/null +++ b/api/server/router/image/backend.go @@ -0,0 +1,47 @@ +package image + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + DeltaCreate(deltaSrc, deltaDest string, outStream io.Writer) error + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) + ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) + Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(imageName, repository, tag string) error + ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/api/server/router/image/image.go b/api/server/router/image/image.go new file mode 100644 index 0000000..132e281 --- /dev/null +++ b/api/server/router/image/image.go @@ -0,0 +1,51 @@ +package image + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router { + r := &imageRouter{ + backend: backend, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.NewPostRoute("/images/create", r.postImagesCreate, router.WithCancel), + router.NewPostRoute("/images/delta", r.postImagesDelta), + router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush, router.WithCancel), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + router.NewPostRoute("/images/prune", r.postImagesPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go new file mode 100644 index 0000000..dd398e4 --- /dev/null +++ b/api/server/router/image/image_routes.go @@ -0,0 +1,400 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { + pause = true + } + + c, _, _, err := s.decoder.DecodeConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: c, + MergeConfigs: true, + }, + Changes: r.Form["changes"], + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + // TODO @jhowardmsft LCOW Support: Eventually we will need an API change + // so that platform comes from (for example) r.Form.Get("platform"). For + // the initial implementation, we assume that the platform is the + // runtime OS of the host. It will also need a validation function such + // as below which should be called after getting it from the API. + // + // Ensures the requested platform is valid and normalized + //func validatePlatform(req string) (string, error) { + // req = strings.ToLower(req) + // if req == "" { + // req = runtime.GOOS // default to host platform + // } + // valid := []string{runtime.GOOS} + // + // if system.LCOWSupported() { + // valid = append(valid, "linux") + // } + // + // for _, item := range valid { + // if req == item { + // return req, nil + // } + // } + // return "", fmt.Errorf("invalid platform requested: %s", req) + //} + // + // And in the call-site: + // if platform, err = validatePlatform(platform); err != nil { + // return err + // } + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output) + } else { //import + src := r.Form.Get("fromSrc") + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + err = s.backend.ImportImage(src, repo, platform, tag, message, r.Body, output, r.Form["changes"]) + } + if err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func (d *imageRouter) postImagesDelta(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + deltaSrc := r.Form.Get("src") + deltaDest := r.Form.Get("dest") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := d.backend.DeltaCreate(deltaSrc, deltaDest, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + image := vars["name"] + tag := r.Form.Get("tag") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + imageFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + filterParam := r.Form.Get("filter") + // FIXME(vdemeester) This has been deprecated in 1.13, and is target for removal for v17.12 + if filterParam != "" { + imageFilters.Add("reference", filterParam) + } + + images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + limit := registry.DefaultSearchLimit + if r.Form.Get("limit") != "" { + limitValue, err := strconv.Atoi(r.Form.Get("limit")) + if err != nil { + return err + } + limit = limitValue + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} + +func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/api/server/router/local.go b/api/server/router/local.go new file mode 100644 index 0000000..ba70f34 --- /dev/null +++ b/api/server/router/local.go @@ -0,0 +1,104 @@ +package router + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// RouteWrapper wraps a route with extra functionality. +// It is passed in when creating a new route. +type RouteWrapper func(r Route) Route + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + var r Route = localRoute{method, path, handler} + for _, o := range opts { + r = o(r) + } + return r +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("GET", path, handler, opts...) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("POST", path, handler, opts...) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("PUT", path, handler, opts...) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("DELETE", path, handler, opts...) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("OPTIONS", path, handler, opts...) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { + return NewRoute("HEAD", path, handler, opts...) +} + +func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if notifier, ok := w.(http.CloseNotifier); ok { + notify := notifier.CloseNotify() + notifyCtx, cancel := context.WithCancel(ctx) + finished := make(chan struct{}) + defer close(finished) + ctx = notifyCtx + go func() { + select { + case <-notify: + cancel() + case <-finished: + } + }() + } + return h(ctx, w, r, vars) + } +} + +// WithCancel makes new route which embeds http.CloseNotifier feature to +// context.Context of handler. +func WithCancel(r Route) Route { + return localRoute{ + method: r.Method(), + path: r.Path(), + handler: cancellableHandler(r.Handler()), + } +} diff --git a/api/server/router/network/backend.go b/api/server/router/network/backend.go new file mode 100644 index 0000000..a32a0b9 --- /dev/null +++ b/api/server/router/network/backend.go @@ -0,0 +1,22 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" +) + +// Backend is all the methods that need to be implemented +// to provide network specific functionality. +type Backend interface { + FindNetwork(idName string) (libnetwork.Network, error) + GetNetworks() []libnetwork.Network + CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error + DeleteNetwork(name string) error + NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) +} diff --git a/api/server/router/network/filter.go b/api/server/router/network/filter.go new file mode 100644 index 0000000..afe4e23 --- /dev/null +++ b/api/server/router/network/filter.go @@ -0,0 +1,87 @@ +package network + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/runconfig" +) + +func filterNetworkByType(nws []types.NetworkResource, netType string) ([]types.NetworkResource, error) { + retNws := []types.NetworkResource{} + switch netType { + case "builtin": + for _, nw := range nws { + if runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + case "custom": + for _, nw := range nws { + if !runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + default: + return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) + } + return retNws, nil +} + +// filterNetworks filters network list according to user specified filter +// and returns user chosen networks +func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { + // if filter is empty, return original network list + if filter.Len() == 0 { + return nws, nil + } + + displayNet := []types.NetworkResource{} + for _, nw := range nws { + if filter.Include("driver") { + if !filter.ExactMatch("driver", nw.Driver) { + continue + } + } + if filter.Include("name") { + if !filter.Match("name", nw.Name) { + continue + } + } + if filter.Include("id") { + if !filter.Match("id", nw.ID) { + continue + } + } + if filter.Include("label") { + if !filter.MatchKVList("label", nw.Labels) { + continue + } + } + if filter.Include("scope") { + if !filter.ExactMatch("scope", nw.Scope) { + continue + } + } + displayNet = append(displayNet, nw) + } + + if filter.Include("type") { + typeNet := []types.NetworkResource{} + errFilter := filter.WalkValues("type", func(fval string) error { + passList, err := filterNetworkByType(displayNet, fval) + if err != nil { + return err + } + typeNet = append(typeNet, passList...) + return nil + }) + if errFilter != nil { + return nil, errFilter + } + displayNet = typeNet + } + + return displayNet, nil +} diff --git a/api/server/router/network/filter_test.go b/api/server/router/network/filter_test.go new file mode 100644 index 0000000..af3e949 --- /dev/null +++ b/api/server/router/network/filter_test.go @@ -0,0 +1,149 @@ +// +build !windows + +package network + +import ( + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +func TestFilterNetworks(t *testing.T) { + networks := []types.NetworkResource{ + { + Name: "host", + Driver: "host", + Scope: "local", + }, + { + Name: "bridge", + Driver: "bridge", + Scope: "local", + }, + { + Name: "none", + Driver: "null", + Scope: "local", + }, + { + Name: "myoverlay", + Driver: "overlay", + Scope: "swarm", + }, + { + Name: "mydrivernet", + Driver: "mydriver", + Scope: "local", + }, + { + Name: "mykvnet", + Driver: "mykvdriver", + Scope: "global", + }, + } + + bridgeDriverFilters := filters.NewArgs() + bridgeDriverFilters.Add("driver", "bridge") + + overlayDriverFilters := filters.NewArgs() + overlayDriverFilters.Add("driver", "overlay") + + nonameDriverFilters := filters.NewArgs() + nonameDriverFilters.Add("driver", "noname") + + customDriverFilters := filters.NewArgs() + customDriverFilters.Add("type", "custom") + + builtinDriverFilters := filters.NewArgs() + builtinDriverFilters.Add("type", "builtin") + + invalidDriverFilters := filters.NewArgs() + invalidDriverFilters.Add("type", "invalid") + + localScopeFilters := filters.NewArgs() + localScopeFilters.Add("scope", "local") + + swarmScopeFilters := filters.NewArgs() + swarmScopeFilters.Add("scope", "swarm") + + globalScopeFilters := filters.NewArgs() + globalScopeFilters.Add("scope", "global") + + testCases := []struct { + filter filters.Args + resultCount int + err string + }{ + { + filter: bridgeDriverFilters, + resultCount: 1, + err: "", + }, + { + filter: overlayDriverFilters, + resultCount: 1, + err: "", + }, + { + filter: nonameDriverFilters, + resultCount: 0, + err: "", + }, + { + filter: customDriverFilters, + resultCount: 3, + err: "", + }, + { + filter: builtinDriverFilters, + resultCount: 3, + err: "", + }, + { + filter: invalidDriverFilters, + resultCount: 0, + err: "Invalid filter: 'type'='invalid'", + }, + { + filter: localScopeFilters, + resultCount: 4, + err: "", + }, + { + filter: swarmScopeFilters, + resultCount: 1, + err: "", + }, + { + filter: globalScopeFilters, + resultCount: 1, + err: "", + }, + } + + for _, testCase := range testCases { + result, err := filterNetworks(networks, testCase.filter) + if testCase.err != "" { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + + } else if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } else { + if err != nil { + t.Fatalf("expect no error, got error '%s'", err) + } + // Make sure result is not nil + if result == nil { + t.Fatal("filterNetworks should not return nil") + } + + if len(result) != testCase.resultCount { + t.Fatalf("expect '%d' networks, got '%d' networks", testCase.resultCount, len(result)) + } + } + } +} diff --git a/api/server/router/network/network.go b/api/server/router/network/network.go new file mode 100644 index 0000000..eaf52aa --- /dev/null +++ b/api/server/router/network/network.go @@ -0,0 +1,44 @@ +package network + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// networkRouter is a router to talk with the network controller +type networkRouter struct { + backend Backend + cluster *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new network router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &networkRouter{ + backend: b, + cluster: c, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the network controller +func (r *networkRouter) Routes() []router.Route { + return r.routes +} + +func (r *networkRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/networks", r.getNetworksList), + router.NewGetRoute("/networks/", r.getNetworksList), + router.NewGetRoute("/networks/{id:.+}", r.getNetwork), + // POST + router.NewPostRoute("/networks/create", r.postNetworkCreate), + router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), + router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), + router.NewPostRoute("/networks/prune", r.postNetworksPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), + } +} diff --git a/api/server/router/network/network_routes.go b/api/server/router/network/network_routes.go new file mode 100644 index 0000000..b9f8592 --- /dev/null +++ b/api/server/router/network/network_routes.go @@ -0,0 +1,472 @@ +package network + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/networkdb" +) + +var ( + // acceptedNetworkFilters is a list of acceptable filters + acceptedNetworkFilters = map[string]bool{ + "driver": true, + "type": true, + "name": true, + "id": true, + "label": true, + "scope": true, + } +) + +func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + filter := r.Form.Get("filters") + netFilters, err := filters.FromParam(filter) + if err != nil { + return err + } + + if err := netFilters.Validate(acceptedNetworkFilters); err != nil { + return err + } + + list := []types.NetworkResource{} + + if nr, err := n.cluster.GetNetworks(); err == nil { + list = append(list, nr...) + } + + // Combine the network list returned by Docker daemon if it is not already + // returned by the cluster manager +SKIP: + for _, nw := range n.backend.GetNetworks() { + for _, nl := range list { + if nl.ID == nw.ID() { + continue SKIP + } + } + + var nr *types.NetworkResource + // Versions < 1.28 fetches all the containers attached to a network + // in a network list api call. It is a heavy weight operation when + // run across all the networks. Starting API version 1.28, this detailed + // info is available for network specific GET API (equivalent to inspect) + if versions.LessThan(httputils.VersionFromContext(ctx), "1.28") { + nr = n.buildDetailedNetworkResources(nw, false) + } else { + nr = n.buildNetworkResource(nw) + } + list = append(list, *nr) + } + + list, err = filterNetworks(list, netFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + term := vars["id"] + var ( + verbose bool + err error + ) + if v := r.URL.Query().Get("verbose"); v != "" { + if verbose, err = strconv.ParseBool(v); err != nil { + err = fmt.Errorf("invalid value for verbose: %s", v) + return errors.NewBadRequestError(err) + } + } + scope := r.URL.Query().Get("scope") + + isMatchingScope := func(scope, term string) bool { + if term != "" { + return scope == term + } + return true + } + + // In case multiple networks have duplicate names, return error. + // TODO (yongtang): should we wrap with version here for backward compatibility? + + // First find based on full ID, return immediately once one is found. + // If a network appears both in swarm and local, assume it is in local first + + // For full name and partial ID, save the result first, and process later + // in case multiple records was found based on the same term + listByFullName := map[string]types.NetworkResource{} + listByPartialID := map[string]types.NetworkResource{} + + nw := n.backend.GetNetworks() + for _, network := range nw { + if network.ID() == term && isMatchingScope(network.Info().Scope(), scope) { + return httputils.WriteJSON(w, http.StatusOK, *n.buildDetailedNetworkResources(network, verbose)) + } + if network.Name() == term && isMatchingScope(network.Info().Scope(), scope) { + // No need to check the ID collision here as we are still in + // local scope and the network ID is unique in this scope. + listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) + } + if strings.HasPrefix(network.ID(), term) && isMatchingScope(network.Info().Scope(), scope) { + // No need to check the ID collision here as we are still in + // local scope and the network ID is unique in this scope. + listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) + } + } + + nr, _ := n.cluster.GetNetworks() + for _, network := range nr { + if network.ID == term && isMatchingScope(network.Scope, scope) { + return httputils.WriteJSON(w, http.StatusOK, network) + } + if network.Name == term && isMatchingScope(network.Scope, scope) { + // Check the ID collision as we are in swarm scope here, and + // the map (of the listByFullName) may have already had a + // network with the same ID (from local scope previously) + if _, ok := listByFullName[network.ID]; !ok { + listByFullName[network.ID] = network + } + } + if strings.HasPrefix(network.ID, term) && isMatchingScope(network.Scope, scope) { + // Check the ID collision as we are in swarm scope here, and + // the map (of the listByPartialID) may have already had a + // network with the same ID (from local scope previously) + if _, ok := listByPartialID[network.ID]; !ok { + listByPartialID[network.ID] = network + } + } + } + + // Find based on full name, returns true only if no duplicates + if len(listByFullName) == 1 { + for _, v := range listByFullName { + return httputils.WriteJSON(w, http.StatusOK, v) + } + } + if len(listByFullName) > 1 { + return fmt.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)) + } + + // Find based on partial ID, returns true only if no duplicates + if len(listByPartialID) == 1 { + for _, v := range listByPartialID { + return httputils.WriteJSON(w, http.StatusOK, v) + } + } + if len(listByPartialID) > 1 { + return fmt.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)) + } + + return libnetwork.ErrNoSuchNetwork(term) +} + +func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var create types.NetworkCreateRequest + + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&create); err != nil { + return err + } + + if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 { + return libnetwork.NetworkNameError(create.Name) + } + + nw, err := n.backend.CreateNetwork(create) + if err != nil { + var warning string + if _, ok := err.(libnetwork.NetworkNameError); ok { + // check if user defined CheckDuplicate, if set true, return err + // otherwise prepare a warning message + if create.CheckDuplicate { + return libnetwork.NetworkNameError(create.Name) + } + warning = libnetwork.NetworkNameError(create.Name).Error() + } + + if _, ok := err.(libnetwork.ManagerRedirectError); !ok { + return err + } + id, err := n.cluster.CreateNetwork(create) + if err != nil { + return err + } + nw = &types.NetworkCreateResponse{ + ID: id, + Warning: warning, + } + } + + return httputils.WriteJSON(w, http.StatusCreated, nw) +} + +func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var connect types.NetworkConnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { + return err + } + + return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig) +} + +func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var disconnect types.NetworkDisconnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { + return err + } + + return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force) +} + +func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if _, err := n.cluster.GetNetwork(vars["id"]); err == nil { + if err = n.cluster.RemoveNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil + } + if err := n.backend.DeleteNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { + r := &types.NetworkResource{} + if nw == nil { + return r + } + + info := nw.Info() + r.Name = nw.Name() + r.ID = nw.ID() + r.Created = info.Created() + r.Scope = info.Scope() + r.Driver = nw.Type() + r.EnableIPv6 = info.IPv6Enabled() + r.Internal = info.Internal() + r.Attachable = info.Attachable() + r.Ingress = info.Ingress() + r.Options = info.DriverOptions() + r.Containers = make(map[string]types.EndpointResource) + buildIpamResources(r, info) + r.Labels = info.Labels() + r.ConfigOnly = info.ConfigOnly() + + if cn := info.ConfigFrom(); cn != "" { + r.ConfigFrom = network.ConfigReference{Network: cn} + } + + peers := info.Peers() + if len(peers) != 0 { + r.Peers = buildPeerInfoResources(peers) + } + + return r +} + +func (n *networkRouter) buildDetailedNetworkResources(nw libnetwork.Network, verbose bool) *types.NetworkResource { + if nw == nil { + return &types.NetworkResource{} + } + + r := n.buildNetworkResource(nw) + epl := nw.Endpoints() + for _, e := range epl { + ei := e.Info() + if ei == nil { + continue + } + sb := ei.Sandbox() + tmpID := e.ID() + key := "ep-" + tmpID + if sb != nil { + key = sb.ContainerID() + } + + r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei) + } + if !verbose { + return r + } + services := nw.Info().Services() + r.Services = make(map[string]network.ServiceInfo) + for name, service := range services { + tasks := []network.Task{} + for _, t := range service.Tasks { + tasks = append(tasks, network.Task{ + Name: t.Name, + EndpointID: t.EndpointID, + EndpointIP: t.EndpointIP, + Info: t.Info, + }) + } + r.Services[name] = network.ServiceInfo{ + VIP: service.VIP, + Ports: service.Ports, + Tasks: tasks, + LocalLBIndex: service.LocalLBIndex, + } + } + return r +} + +func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo { + peerInfo := make([]network.PeerInfo, 0, len(peers)) + for _, peer := range peers { + peerInfo = append(peerInfo, network.PeerInfo{ + Name: peer.Name, + IP: peer.IP, + }) + } + return peerInfo +} + +func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { + id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() + + ipv4Info, ipv6Info := nwInfo.IpamInfo() + + r.IPAM.Driver = id + + r.IPAM.Options = opts + + r.IPAM.Config = []network.IPAMConfig{} + for _, ip4 := range ipv4conf { + if ip4.PreferredPool == "" { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip4.PreferredPool + iData.IPRange = ip4.SubPool + iData.Gateway = ip4.Gateway + iData.AuxAddress = ip4.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if len(r.IPAM.Config) == 0 { + for _, ip4Info := range ipv4Info { + iData := network.IPAMConfig{} + iData.Subnet = ip4Info.IPAMData.Pool.String() + iData.Gateway = ip4Info.IPAMData.Gateway.IP.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } + + hasIpv6Conf := false + for _, ip6 := range ipv6conf { + if ip6.PreferredPool == "" { + continue + } + hasIpv6Conf = true + iData := network.IPAMConfig{} + iData.Subnet = ip6.PreferredPool + iData.IPRange = ip6.SubPool + iData.Gateway = ip6.Gateway + iData.AuxAddress = ip6.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if !hasIpv6Conf { + for _, ip6Info := range ipv6Info { + if ip6Info.IPAMData.Pool == nil { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip6Info.IPAMData.Pool.String() + iData.Gateway = ip6Info.IPAMData.Gateway.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } +} + +func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource { + er := types.EndpointResource{} + + er.EndpointID = id + er.Name = name + ei := info + if ei == nil { + return er + } + + if iface := ei.Iface(); iface != nil { + if mac := iface.MacAddress(); mac != nil { + er.MacAddress = mac.String() + } + if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { + er.IPv4Address = ip.String() + } + + if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { + er.IPv6Address = ipv6.String() + } + } + return er +} + +func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := n.backend.NetworksPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/api/server/router/plugin/backend.go b/api/server/router/plugin/backend.go new file mode 100644 index 0000000..a5f3c97 --- /dev/null +++ b/api/server/router/plugin/backend.go @@ -0,0 +1,26 @@ +package plugin + +import ( + "io" + "net/http" + + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// Backend for Plugin +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + List(filters.Args) ([]enginetypes.Plugin, error) + Inspect(name string) (*enginetypes.Plugin, error) + Remove(name string, config *enginetypes.PluginRmConfig) error + Set(name string, args []string) error + Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error +} diff --git a/api/server/router/plugin/plugin.go b/api/server/router/plugin/plugin.go new file mode 100644 index 0000000..22819e2 --- /dev/null +++ b/api/server/router/plugin/plugin.go @@ -0,0 +1,39 @@ +package plugin + +import "github.com/docker/docker/api/server/router" + +// pluginRouter is a router to talk with the plugin controller +type pluginRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new plugin router +func NewRouter(b Backend) router.Router { + r := &pluginRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the plugin controller +func (r *pluginRouter) Routes() []router.Route { + return r.routes +} + +func (r *pluginRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/plugins", r.listPlugins), + router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin), + router.NewGetRoute("/plugins/privileges", r.getPrivileges), + router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), + router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? + router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), + router.NewPostRoute("/plugins/pull", r.pullPlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin, router.WithCancel), + router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), + router.NewPostRoute("/plugins/create", r.createPlugin), + } +} diff --git a/api/server/router/plugin/plugin_routes.go b/api/server/router/plugin/plugin_routes.go new file mode 100644 index 0000000..79e3cf5 --- /dev/null +++ b/api/server/router/plugin/plugin_routes.go @@ -0,0 +1,310 @@ +package plugin + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) { + + metaHeaders := map[string][]string{} + for k, v := range headers { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + // Get X-Registry-Auth + authEncoded := headers.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + authConfig = &types.AuthConfig{} + } + } + + return metaHeaders, authConfig +} + +// parseRemoteRef parses the remote reference into a reference.Named +// returning the tag associated with the reference. In the case the +// given reference string includes both digest and tag, the returned +// reference will have the digest without the tag, but the tag will +// be returned. +func parseRemoteRef(remote string) (reference.Named, string, error) { + // Parse remote reference, supporting remotes with name and tag + remoteRef, err := reference.ParseNormalizedNamed(remote) + if err != nil { + return nil, "", err + } + + type canonicalWithTag interface { + reference.Canonical + Tag() string + } + + if canonical, ok := remoteRef.(canonicalWithTag); ok { + remoteRef, err = reference.WithDigest(reference.TrimNamed(remoteRef), canonical.Digest()) + if err != nil { + return nil, "", err + } + return remoteRef, canonical.Tag(), nil + } + + remoteRef = reference.TagNameOnly(remoteRef) + + return remoteRef, "", nil +} + +func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + ref, _, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, privileges) +} + +func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, vars["name"]) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Upgrade(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, r.FormValue("name")) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + + return nil +} + +func getName(ref reference.Named, tag, name string) (string, error) { + if name == "" { + if _, ok := ref.(reference.Canonical); ok { + trimmed := reference.TrimNamed(ref) + if tag != "" { + nt, err := reference.WithTag(trimmed, tag) + if err != nil { + return "", err + } + name = reference.FamiliarString(nt) + } else { + name = reference.FamiliarString(reference.TagNameOnly(trimmed)) + } + } else { + name = reference.FamiliarString(ref) + } + } else { + localRef, err := reference.ParseNormalizedNamed(name) + if err != nil { + return "", err + } + if _, ok := localRef.(reference.Canonical); ok { + return "", errors.New("cannot use digest in plugin tag") + } + if reference.IsNameOnly(localRef) { + // TODO: log change in name to out stream + name = reference.FamiliarString(reference.TagNameOnly(localRef)) + } + } + return name, nil +} + +func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + options := &types.PluginCreateOptions{ + RepoName: r.FormValue("name")} + + if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil { + return err + } + //TODO: send progress bar + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + timeout, err := strconv.Atoi(r.Form.Get("timeout")) + if err != nil { + return err + } + config := &types.PluginEnableConfig{Timeout: timeout} + + return pr.backend.Enable(name, config) +} + +func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginDisableConfig{ + ForceDisable: httputils.BoolValue(r, "force"), + } + + return pr.backend.Disable(name, config) +} + +func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + } + return pr.backend.Remove(name, config) +} + +func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.FormatError(err)) + } + return nil +} + +func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var args []string + if err := json.NewDecoder(r.Body).Decode(&args); err != nil { + return err + } + if err := pr.backend.Set(vars["name"], args); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pluginFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + l, err := pr.backend.List(pluginFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, l) +} + +func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + result, err := pr.backend.Inspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, result) +} diff --git a/api/server/router/router.go b/api/server/router/router.go new file mode 100644 index 0000000..2de25c2 --- /dev/null +++ b/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/api/server/router/session/backend.go b/api/server/router/session/backend.go new file mode 100644 index 0000000..ad4cc1b --- /dev/null +++ b/api/server/router/session/backend.go @@ -0,0 +1,12 @@ +package session + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Backend abstracts an session receiver from an http request. +type Backend interface { + HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error +} diff --git a/api/server/router/session/session.go b/api/server/router/session/session.go new file mode 100644 index 0000000..977a9c4 --- /dev/null +++ b/api/server/router/session/session.go @@ -0,0 +1,29 @@ +package session + +import "github.com/docker/docker/api/server/router" + +// sessionRouter is a router to talk with the session controller +type sessionRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new session router +func NewRouter(b Backend) router.Router { + r := &sessionRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the session controller +func (r *sessionRouter) Routes() []router.Route { + return r.routes +} + +func (r *sessionRouter) initRoutes() { + r.routes = []router.Route{ + router.Experimental(router.NewPostRoute("/session", r.startSession)), + } +} diff --git a/api/server/router/session/session_routes.go b/api/server/router/session/session_routes.go new file mode 100644 index 0000000..ef9753c --- /dev/null +++ b/api/server/router/session/session_routes.go @@ -0,0 +1,16 @@ +package session + +import ( + "net/http" + + apierrors "github.com/docker/docker/api/errors" + "golang.org/x/net/context" +) + +func (sr *sessionRouter) startSession(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := sr.backend.HandleHTTPRequest(ctx, w, r) + if err != nil { + return apierrors.NewBadRequestError(err) + } + return nil +} diff --git a/api/server/router/swarm/backend.go b/api/server/router/swarm/backend.go new file mode 100644 index 0000000..3b7933d --- /dev/null +++ b/api/server/router/swarm/backend.go @@ -0,0 +1,47 @@ +package swarm + +import ( + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// Backend abstracts a swarm manager. +type Backend interface { + Init(req types.InitRequest) (string, error) + Join(req types.JoinRequest) error + Leave(force bool) error + Inspect() (types.Swarm, error) + Update(uint64, types.Spec, types.UpdateFlags) error + GetUnlockKey() (string, error) + UnlockSwarm(req types.UnlockRequest) error + + GetServices(basictypes.ServiceListOptions) ([]types.Service, error) + GetService(idOrName string, insertDefaults bool) (types.Service, error) + CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error) + UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error) + RemoveService(string) error + + ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + + GetNodes(basictypes.NodeListOptions) ([]types.Node, error) + GetNode(string) (types.Node, error) + UpdateNode(string, uint64, types.NodeSpec) error + RemoveNode(string, bool) error + + GetTasks(basictypes.TaskListOptions) ([]types.Task, error) + GetTask(string) (types.Task, error) + + GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) + CreateSecret(s types.SecretSpec) (string, error) + RemoveSecret(idOrName string) error + GetSecret(id string) (types.Secret, error) + UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error + + GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error) + CreateConfig(s types.ConfigSpec) (string, error) + RemoveConfig(id string) error + GetConfig(id string) (types.Config, error) + UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error +} diff --git a/api/server/router/swarm/cluster.go b/api/server/router/swarm/cluster.go new file mode 100644 index 0000000..2529250 --- /dev/null +++ b/api/server/router/swarm/cluster.go @@ -0,0 +1,63 @@ +package swarm + +import "github.com/docker/docker/api/server/router" + +// swarmRouter is a router to talk with the build controller +type swarmRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &swarmRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the swarm controller +func (sr *swarmRouter) Routes() []router.Route { + return sr.routes +} + +func (sr *swarmRouter) initRoutes() { + sr.routes = []router.Route{ + router.NewPostRoute("/swarm/init", sr.initCluster), + router.NewPostRoute("/swarm/join", sr.joinCluster), + router.NewPostRoute("/swarm/leave", sr.leaveCluster), + router.NewGetRoute("/swarm", sr.inspectCluster), + router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey), + router.NewPostRoute("/swarm/update", sr.updateCluster), + router.NewPostRoute("/swarm/unlock", sr.unlockCluster), + + router.NewGetRoute("/services", sr.getServices), + router.NewGetRoute("/services/{id}", sr.getService), + router.NewPostRoute("/services/create", sr.createService), + router.NewPostRoute("/services/{id}/update", sr.updateService), + router.NewDeleteRoute("/services/{id}", sr.removeService), + router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs, router.WithCancel), + + router.NewGetRoute("/nodes", sr.getNodes), + router.NewGetRoute("/nodes/{id}", sr.getNode), + router.NewDeleteRoute("/nodes/{id}", sr.removeNode), + router.NewPostRoute("/nodes/{id}/update", sr.updateNode), + + router.NewGetRoute("/tasks", sr.getTasks), + router.NewGetRoute("/tasks/{id}", sr.getTask), + router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs, router.WithCancel), + + router.NewGetRoute("/secrets", sr.getSecrets), + router.NewPostRoute("/secrets/create", sr.createSecret), + router.NewDeleteRoute("/secrets/{id}", sr.removeSecret), + router.NewGetRoute("/secrets/{id}", sr.getSecret), + router.NewPostRoute("/secrets/{id}/update", sr.updateSecret), + + router.NewGetRoute("/configs", sr.getConfigs), + router.NewPostRoute("/configs/create", sr.createConfig), + router.NewDeleteRoute("/configs/{id}", sr.removeConfig), + router.NewGetRoute("/configs/{id}", sr.getConfig), + router.NewPostRoute("/configs/{id}/update", sr.updateConfig), + } +} diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go new file mode 100644 index 0000000..91461da --- /dev/null +++ b/api/server/router/swarm/cluster_routes.go @@ -0,0 +1,492 @@ +package swarm + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.InitRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + nodeID, err := sr.backend.Init(req) + if err != nil { + logrus.Errorf("Error initializing swarm: %v", err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, nodeID) +} + +func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.JoinRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + return sr.backend.Join(req) +} + +func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + return sr.backend.Leave(force) +} + +func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + swarm, err := sr.backend.Inspect() + if err != nil { + logrus.Errorf("Error getting swarm: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, swarm) +} + +func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var swarm types.Spec + if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid swarm version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + var flags types.UpdateFlags + + if value := r.URL.Query().Get("rotateWorkerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for rotateWorkerToken: %s", value) + return errors.NewBadRequestError(err) + } + + flags.RotateWorkerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for rotateManagerToken: %s", value) + return errors.NewBadRequestError(err) + } + + flags.RotateManagerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value)) + } + + flags.RotateManagerUnlockKey = rot + } + + if err := sr.backend.Update(version, swarm, flags); err != nil { + logrus.Errorf("Error configuring swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.UnlockRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + if err := sr.backend.UnlockSwarm(req); err != nil { + logrus.Errorf("Error unlocking swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + unlockKey, err := sr.backend.GetUnlockKey() + if err != nil { + logrus.WithError(err).Errorf("Error retrieving swarm unlock key") + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + }) +} + +func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting services: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, services) +} + +func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var insertDefaults bool + if value := r.URL.Query().Get("insertDefaults"); value != "" { + var err error + insertDefaults, err = strconv.ParseBool(value) + if err != nil { + err := fmt.Errorf("invalid value for insertDefaults: %s", value) + return errors.NewBadRequestError(err) + } + } + + service, err := sr.backend.GetService(vars["id"], insertDefaults) + if err != nil { + logrus.Errorf("Error getting service %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, service) +} + +func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + cliVersion := r.Header.Get("version") + queryRegistry := false + if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { + queryRegistry = true + } + + resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry) + if err != nil { + logrus.Errorf("Error creating service %s: %v", service.Name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, resp) +} + +func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid service version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + var flags basictypes.ServiceUpdateOptions + + // Get returns "" if the header does not exist + flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth") + flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom") + flags.Rollback = r.URL.Query().Get("rollback") + cliVersion := r.Header.Get("version") + queryRegistry := false + if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { + queryRegistry = true + } + + resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry) + if err != nil { + logrus.Errorf("Error updating service %s: %v", vars["id"], err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveService(vars["id"]); err != nil { + logrus.Errorf("Error removing service %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTaskLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // make a selector to pass to the helper function + selector := &backend.LogSelector{ + Tasks: []string{vars["id"]}, + } + return sr.swarmLogs(ctx, w, r, selector) +} + +func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // make a selector to pass to the helper function + selector := &backend.LogSelector{ + Services: []string{vars["id"]}, + } + return sr.swarmLogs(ctx, w, r, selector) +} + +func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting nodes: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, nodes) +} + +func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + node, err := sr.backend.GetNode(vars["id"]) + if err != nil { + logrus.Errorf("Error getting node %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, node) +} + +func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var node types.NodeSpec + if err := json.NewDecoder(r.Body).Decode(&node); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + err := fmt.Errorf("invalid node version '%s': %v", rawVersion, err) + return errors.NewBadRequestError(err) + } + + if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { + logrus.Errorf("Error updating node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + + if err := sr.backend.RemoveNode(vars["id"], force); err != nil { + logrus.Errorf("Error removing node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting tasks: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, tasks) +} + +func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + task, err := sr.backend.GetTask(vars["id"]) + if err != nil { + logrus.Errorf("Error getting task %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, task) +} + +func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secrets) +} + +func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return err + } + + id, err := sr.backend.CreateSecret(secret) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveSecret(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + secret, err := sr.backend.GetSecret(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secret) +} + +func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid secret version")) + } + + id := vars["id"] + if err := sr.backend.UpdateSecret(id, version, secret); err != nil { + return err + } + + return nil +} + +func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + configs, err := sr.backend.GetConfigs(basictypes.ConfigListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, configs) +} + +func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config types.ConfigSpec + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + return err + } + + id, err := sr.backend.CreateConfig(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ConfigCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveConfig(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + config, err := sr.backend.GetConfig(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, config) +} + +func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config types.ConfigSpec + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid config version")) + } + + id := vars["id"] + if err := sr.backend.UpdateConfig(id, version, config); err != nil { + return err + } + + return nil +} diff --git a/api/server/router/swarm/helpers.go b/api/server/router/swarm/helpers.go new file mode 100644 index 0000000..ea692ea --- /dev/null +++ b/api/server/router/swarm/helpers.go @@ -0,0 +1,65 @@ +package swarm + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// swarmLogs takes an http response, request, and selector, and writes the logs +// specified by the selector to the response +func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, selector *backend.LogSelector) error { + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + // there is probably a neater way to manufacture the ContainerLogsOptions + // struct, probably in the caller, to eliminate the dependency on net/http + logsConfig := &basictypes.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + } + + tty := false + // checking for whether logs are TTY involves iterating over every service + // and task. idk if there is a better way + for _, service := range selector.Services { + s, err := sr.backend.GetService(service, false) + if err != nil { + // maybe should return some context with this error? + return err + } + tty = s.Spec.TaskTemplate.ContainerSpec.TTY || tty + } + for _, task := range selector.Tasks { + t, err := sr.backend.GetTask(task) + if err != nil { + // as above + return err + } + tty = t.Spec.ContainerSpec.TTY || tty + } + + msgs, err := sr.backend.ServiceLogs(ctx, selector, logsConfig) + if err != nil { + return err + } + + httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty) + return nil +} diff --git a/api/server/router/system/backend.go b/api/server/router/system/backend.go new file mode 100644 index 0000000..da1de38 --- /dev/null +++ b/api/server/router/system/backend.go @@ -0,0 +1,21 @@ +package system + +import ( + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) + SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/api/server/router/system/system.go b/api/server/router/system/system.go new file mode 100644 index 0000000..a64631e --- /dev/null +++ b/api/server/router/system/system.go @@ -0,0 +1,42 @@ +package system + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/daemon/cluster" +) + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + cluster *cluster.Cluster + routes []router.Route + builder *fscache.FSCache +} + +// NewRouter initializes a new system router +func NewRouter(b Backend, c *cluster.Cluster, fscache *fscache.FSCache) router.Router { + r := &systemRouter{ + backend: b, + cluster: c, + builder: fscache, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.NewGetRoute("/events", r.getEvents, router.WithCancel), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewGetRoute("/system/df", r.getDiskUsage, router.WithCancel), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go new file mode 100644 index 0000000..30fb000 --- /dev/null +++ b/api/server/router/system/system_routes.go @@ -0,0 +1,192 @@ +package system + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + pkgerrors "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + if s.cluster != nil { + info.Swarm = s.cluster.Info() + } + + if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") { + // TODO: handle this conversion in engine-api + type oldInfo struct { + *types.Info + ExecutionDriver string + } + old := &oldInfo{ + Info: info, + ExecutionDriver: "", + } + nameOnlySecurityOptions := []string{} + kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) + if err != nil { + return err + } + for _, s := range kvSecOpts { + nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) + } + old.SecurityOptions = nameOnlySecurityOptions + return httputils.WriteJSON(w, http.StatusOK, old) + } + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + du, err := s.backend.SystemDiskUsage(ctx) + if err != nil { + return err + } + builderSize, err := s.builder.DiskUsage() + if err != nil { + return pkgerrors.Wrap(err, "error getting build cache usage") + } + du.BuilderSize = builderSize + + return httputils.WriteJSON(w, http.StatusOK, du) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + since, err := eventTime(r.Form.Get("since")) + if err != nil { + return err + } + until, err := eventTime(r.Form.Get("until")) + if err != nil { + return err + } + + var ( + timeout <-chan time.Time + onlyPastEvents bool + ) + if !until.IsZero() { + if until.Before(since) { + return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) + } + + now := time.Now() + + onlyPastEvents = until.Before(now) + + if !onlyPastEvents { + dur := until.Sub(now) + timeout = time.NewTimer(dur).C + } + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, until, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + if onlyPastEvents { + return nil + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-ctx.Done(): + logrus.Debug("Client context cancelled, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, ®istry.AuthenticateOKBody{ + Status: status, + IdentityToken: token, + }) +} + +func eventTime(formTime string) (time.Time, error) { + t, tNano, err := timetypes.ParseTimestamps(formTime, -1) + if err != nil { + return time.Time{}, err + } + if t == -1 { + return time.Time{}, nil + } + return time.Unix(t, tNano), nil +} diff --git a/api/server/router/volume/backend.go b/api/server/router/volume/backend.go new file mode 100644 index 0000000..b97cb94 --- /dev/null +++ b/api/server/router/volume/backend.go @@ -0,0 +1,19 @@ +package volume + +import ( + "golang.org/x/net/context" + + // TODO return types need to be refactored into pkg + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string, force bool) error + VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) +} diff --git a/api/server/router/volume/volume.go b/api/server/router/volume/volume.go new file mode 100644 index 0000000..b24c8fe --- /dev/null +++ b/api/server/router/volume/volume.go @@ -0,0 +1,36 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + router.NewPostRoute("/volumes/prune", r.postVolumesPrune, router.WithCancel), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/api/server/router/volume/volume_routes.go b/api/server/router/volume/volume_routes.go new file mode 100644 index 0000000..f0f4901 --- /dev/null +++ b/api/server/router/volume/volume_routes.go @@ -0,0 +1,85 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumesListOKBody{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req volumetypes.VolumesCreateBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + force := httputils.BoolValue(r, "force") + if err := v.backend.VolumeRm(vars["name"], force); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := v.backend.VolumesPrune(ctx, pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/api/server/router_swapper.go b/api/server/router_swapper.go new file mode 100644 index 0000000..1ecc7a7 --- /dev/null +++ b/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/api/server/server.go b/api/server/server.go new file mode 100644 index 0000000..d402019 --- /dev/null +++ b/api/server/server.go @@ -0,0 +1,208 @@ +package server + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/dockerversion" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + routerSwapper *routerSwapper + middlewares []middleware.Middleware +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// UseMiddleware appends a new middleware to the request chain. +// This needs to be called before the API routes are configured. +func (s *Server) UseMiddleware(m middleware.Middleware) { + s.middlewares = append(s.middlewares, m) +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Serve method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for range s.servers { + err := <-chErrors + if err != nil { + return err + } + } + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create an http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.WithValue(context.Background(), dockerversion.UAStringKey, r.Header.Get("User-Agent")) + handlerFunc := s.handlerWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + if statusCode >= 500 { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + } + httputils.MakeErrorHandler(err)(w, r) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { + s.routers = append(s.routers, routers...) + + m := s.createMux() + if enableProfiler { + profilerSetup(m) + } + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debug("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) + notFoundHandler := httputils.MakeErrorHandler(err) + m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) + m.NotFoundHandler = notFoundHandler + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +// DisableProfiler reloads the server mux without adding the profiler routes. +func (s *Server) DisableProfiler() { + s.routerSwapper.Swap(s.createMux()) +} + +// EnableProfiler reloads the server mux adding the profiler routes. +func (s *Server) EnableProfiler() { + m := s.createMux() + profilerSetup(m) + s.routerSwapper.Swap(m) +} diff --git a/api/server/server_test.go b/api/server/server_test.go new file mode 100644 index 0000000..272dc81 --- /dev/null +++ b/api/server/server_test.go @@ -0,0 +1,46 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + + "golang.org/x/net/context" +) + +func TestMiddlewares(t *testing.T) { + cfg := &Config{ + Version: "0.1omega2", + } + srv := &Server{ + cfg: cfg, + } + + srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion)) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatal("Expected version, got empty string") + } + + if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") { + t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv) + } + + return nil + } + + handlerFunc := srv.handlerWithGlobalMiddlewares(localHandler) + if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} diff --git a/api/swagger-gen.yaml b/api/swagger-gen.yaml new file mode 100644 index 0000000..f07a027 --- /dev/null +++ b/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/api/swagger.yaml b/api/swagger.yaml new file mode 100644 index 0000000..f27aabf --- /dev/null +++ b/api/swagger.yaml @@ -0,0 +1,8584 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.31" +info: + title: "Docker Engine API" + version: "1.31" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. + + For Docker Engine 17.06, the API version is 1.30. To lock to this version, you prefix the URL with `/v1.30`. For example, calling `/info` is the same as calling `/v1.30/info`. + + Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. + + In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. + + The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. + + This documentation is for version 1.31 of the API. Use this table to find documentation for previous versions of the API: + + Docker version | API version | Changes + ----------------|-------------|--------- + 17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes) + 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes) + 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) + 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) + 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) + 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) + 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) + 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) + 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) + 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) + 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) + 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) + 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + default: {} + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + StartPeriod: + description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + type: "object" + description: "A map of exposed container ports and the host port they should map to." + additionalProperties: + type: "object" + properties: + HostIp: + type: "string" + description: "The host IP address" + HostPort: + type: "string" + description: "The host port number, as a string" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: "IPC namespace to use for the container." + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: "Allocates a random host port for all of a container's exposed ports." + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + + Config: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: + - "array" + - "string" + items: + type: "string" + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + properties: + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: + - "array" + - "string" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkConfig: + description: "TODO: check is correct" + type: "object" + properties: + Bridge: + type: "string" + Gateway: + type: "string" + Address: + type: "string" + IPPrefixLen: + type: "integer" + MacAddress: + type: "string" + PortMapping: + type: "string" + Ports: + type: "array" + items: + $ref: "#/definitions/Port" + + GraphDriverData: + description: "Information about a container's graph driver." + type: "object" + required: [Name, Data] + properties: + Name: + type: "string" + x-nullable: false + Data: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + + Image: + type: "object" + required: + - Id + - Parent + - Comment + - Created + - Container + - DockerVersion + - Author + - Architecture + - Os + - Size + - VirtualSize + - GraphDriver + - RootFS + properties: + Id: + type: "string" + x-nullable: false + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Created: + type: "string" + x-nullable: false + Container: + type: "string" + x-nullable: false + ContainerConfig: + $ref: "#/definitions/Config" + DockerVersion: + type: "string" + x-nullable: false + Author: + type: "string" + x-nullable: false + Config: + $ref: "#/definitions/Config" + Architecture: + type: "string" + x-nullable: false + Os: + type: "string" + x-nullable: false + OsVersion: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + VirtualSize: + type: "integer" + format: "int64" + x-nullable: false + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + Metadata: + type: "object" + properties: + LastTagTime: + type: "string" + format: "dateTime" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + CreatedAt: + type: "string" + format: "dateTime" + description: "Time volume was created." + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + required: [Size, RefCount] + properties: + Size: + type: "integer" + description: "The disk space used by the volume (local driver only)" + default: -1 + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: "The number of containers referencing this volume." + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + CreatedAt: "2016-06-07T20:31:11.853781916Z" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + CreateImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + ProgressDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + IPAMConfig: + description: "IPAM configurations for the endpoint" + type: "object" + properties: + IPv4Address: + type: "string" + IPv6Address: + type: "string" + LinkLocalIPs: + type: "array" + items: + type: "string" + Links: + type: "array" + items: + type: "string" + Aliases: + type: "array" + items: + type: "string" + NetworkID: + type: "string" + EndpointID: + type: "string" + Gateway: + type: "string" + IPAddress: + type: "string" + IPPrefixLen: + type: "integer" + IPv6Gateway: + type: "string" + GlobalIPv6Address: + type: "string" + GlobalIPv6PrefixLen: + type: "integer" + format: "int64" + MacAddress: + type: "string" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + Destination: + type: "string" + x-nullable: false + Type: + type: "string" + x-nullable: false + Options: + type: "array" + items: + type: "string" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + Name: + type: "string" + x-nullable: false + Enabled: + description: "True when the plugin is running. False when the plugin is not running, only installed." + type: "boolean" + x-nullable: false + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Documentation: + type: "string" + x-nullable: false + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + Socket: + type: "string" + x-nullable: false + Entrypoint: + type: "array" + items: + type: "string" + WorkDir: + type: "string" + x-nullable: false + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + GID: + type: "integer" + format: "uint32" + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + AllowAllDevices: + type: "boolean" + x-nullable: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + IpcHost: + type: "boolean" + x-nullable: false + PidHost: + type: "boolean" + x-nullable: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + diff_ids: + type: "array" + items: + type: "string" + example: + Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. + The client must send the version number along with the modified specification when updating these objects. + This approach ensures safe concurrency and determinism in that the change on the object + may not be applied if the version number has changed from the last read. In other words, + if two update requests specify the same base version, only one of the requests can succeed. + As a result, two separate update requests that happen at the same time will not + unintentially overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "int64" + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + Node: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + type: "object" + properties: + Hostname: + type: "string" + Platform: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + Resources: + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + MemoryBytes: + type: "integer" + format: "int64" + Engine: + type: "object" + properties: + EngineVersion: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + TLSInfo: + $ref: "#/definitions/SwarmSpec" + example: + ID: "24ifsmvkjbyhk" + Version: + Index: 8 + CreatedAt: "2016-06-07T20:31:11.853781916Z" + UpdatedAt: "2016-06-07T20:31:11.999868824Z" + Spec: + Name: "my-node" + Role: "manager" + Availability: "active" + Labels: + foo: "bar" + Description: + Hostname: "bf3067039e47" + Platform: + Architecture: "x86_64" + OS: "linux" + Resources: + NanoCPUs: 4000000000 + MemoryBytes: 8272408576 + Engine: + EngineVersion: "17.04.0" + Labels: + foo: "bar" + Plugins: + - Type: "Volume" + Name: "local" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + Status: + State: "ready" + Addr: "172.17.0.2" + ManagerStatus: + Leader: true + Reachability: "reachable" + Addr: "172.17.0.2:2377" + TLSInfo: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + TLSInfo: + description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" + type: "object" + properties: + TrustRoot: + description: "The root CA certificate(s) that are used to validate leaf TLS certificates" + type: "string" + CertIssuerSubject: + description: "The base64-url-safe-encoded raw subject bytes of the issuer" + type: "string" + CertIssuerPublicKey: + description: "The base64-url-safe-encoded raw public key bytes of the issuer" + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Orchestration: + description: "Orchestration configuration." + type: "object" + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "int64" + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "int64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "int64" + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + Dispatcher: + description: "Dispatcher configuration." + type: "object" + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + CAConfig: + description: "CA configuration." + type: "object" + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + CACert: + description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." + type: "string" + SigningCACert: + description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." + type: "string" + SigningCAKey: + description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." + type: "string" + ForceRotate: + description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if unspecified by a service. + + Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + example: + Name: "default" + Orchestration: + TaskHistoryRetentionLimit: 10 + Raft: + SnapshotInterval: 10000 + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + ElectionTick: 3 + Dispatcher: + HeartbeatPeriod: 5000000000 + CAConfig: + NodeCertExpiry: 7776000000000000 + JoinTokens: + Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + EncryptionConfig: + AutoLockManagers: false + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: "Whether there is currently a root CA rotation in progress for the swarm" + type: "boolean" + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + ContainerSpec: + type: "object" + properties: + Image: + description: "The image name to use for the container." + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. + The format of extra hosts on swarmkit is specified in: + http://man7.org/linux/man-pages/man5/hosts.5.html + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Secrets: + description: "Secrets contains references to zero or more secrets that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: "SecretID represents the ID of the specific secret that we're referencing." + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, but this is just provided for + lookup/display purposes. The secret in the reference will be identified by its ID. + type: "string" + + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + type: "object" + properties: + NanoCPUs: + description: "CPU limit in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory limit in Bytes." + type: "integer" + format: "int64" + Reservation: + description: "Define resources reservation." + properties: + NanoCPUs: + description: "CPU reservation in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory reservation in Bytes." + type: "integer" + format: "int64" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + Preferences: + description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: "label descriptor, such as engine.labels.az" + type: "string" + Platforms: + description: "An array of supported platforms." + type: "array" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Runtime: + description: "Runtime is the type of runtime specified for the task executor." + type: "string" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between rollback iterations, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an rolled back task fails to run, or stops running during the rollback." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded secret data" + type: "array" + items: + type: "string" + Secret: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container than inspecting a single container. For example, + the list of linked containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/Config" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 406: + description: "impossible to attach" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: | + The status of the container. For example, `"running"` or `"exited"`. + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the cgroups freezer is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + type: "string" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/Config" + NetworkSettings: + $ref: "#/definitions/NetworkConfig" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + SecondaryIPAddresses: null + SecondaryIPv6Addresses: null + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of last read, which is used + for calculating the CPU usage percentage. It is not the same as the + `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." + type: "string" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "TODO" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" + type: "integer" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + Data: {} + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/delta: + post: + summary: "Create a delta" + description: "Create a binary delta between two images." + operationId: "DeltaCreate" + produces: [ "application/json" ] + responses: + 201: + description: "The delta was created successfully" + schema: + $ref: "#/definitions/ImageSummary" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "src" + in: "query" + required: true + description: "Name of the image to use as a base for the delta. The name may include a tag or digest." + type: "string" + - name: "dest" + in: "query" + required: true + description: "Name of the image to use as a target for the delta. The name may include a tag or digest." + type: "string" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + Architecture: + type: "string" + Containers: + type: "integer" + ContainersRunning: + type: "integer" + ContainersStopped: + type: "integer" + ContainersPaused: + type: "integer" + CpuCfsPeriod: + type: "boolean" + CpuCfsQuota: + type: "boolean" + Debug: + type: "boolean" + DiscoveryBackend: + type: "string" + DockerRootDir: + type: "string" + Driver: + type: "string" + DriverStatus: + type: "array" + items: + type: "array" + items: + type: "string" + SystemStatus: + type: "array" + items: + type: "array" + items: + type: "string" + Plugins: + type: "object" + properties: + Volume: + type: "array" + items: + type: "string" + Network: + type: "array" + items: + type: "string" + Log: + type: "array" + items: + type: "string" + ExperimentalBuild: + type: "boolean" + HttpProxy: + type: "string" + HttpsProxy: + type: "string" + ID: + type: "string" + IPv4Forwarding: + type: "boolean" + Images: + type: "integer" + IndexServerAddress: + type: "string" + InitPath: + type: "string" + InitSha1: + type: "string" + KernelVersion: + type: "string" + Labels: + type: "array" + items: + type: "string" + MemTotal: + type: "integer" + MemoryLimit: + type: "boolean" + NCPU: + type: "integer" + NEventsListener: + type: "integer" + NFd: + type: "integer" + NGoroutines: + type: "integer" + Name: + type: "string" + NoProxy: + type: "string" + OomKillDisable: + type: "boolean" + OSType: + type: "string" + OomScoreAdj: + type: "integer" + OperatingSystem: + type: "string" + RegistryConfig: + type: "object" + properties: + IndexConfigs: + type: "object" + additionalProperties: + type: "object" + properties: + Mirrors: + type: "array" + items: + type: "string" + Name: + type: "string" + Official: + type: "boolean" + Secure: + type: "boolean" + InsecureRegistryCIDRs: + type: "array" + items: + type: "string" + SwapLimit: + type: "boolean" + SystemTime: + type: "string" + ServerVersion: + type: "string" + examples: + application/json: + Architecture: "x86_64" + ClusterStore: "etcd://localhost:2379" + CgroupDriver: "cgroupfs" + Containers: 11 + ContainersRunning: 7 + ContainersStopped: 3 + ContainersPaused: 1 + CpuCfsPeriod: true + CpuCfsQuota: true + Debug: false + DockerRootDir: "/var/lib/docker" + Driver: "btrfs" + DriverStatus: + - + - "" + ExperimentalBuild: false + HttpProxy: "http://test:test@localhost:8080" + HttpsProxy: "https://test:test@localhost:8080" + ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + IPv4Forwarding: true + Images: 16 + IndexServerAddress: "https://index.docker.io/v1/" + InitPath: "/usr/bin/docker" + InitSha1: "" + KernelMemory: true + KernelVersion: "3.12.0-1-amd64" + Labels: + - "storage=ssd" + MemTotal: 2099236864 + MemoryLimit: true + NCPU: 1 + NEventsListener: 0 + NFd: 11 + NGoroutines: 21 + Name: "prod-server-42" + NoProxy: "9.81.1.160" + OomKillDisable: true + OSType: "linux" + OperatingSystem: "Boot2Docker" + Plugins: + Volume: + - "local" + Network: + - "null" + - "host" + - "bridge" + RegistryConfig: + IndexConfigs: + docker.io: + Name: "docker.io" + Official: true + Secure: true + InsecureRegistryCIDRs: + - "127.0.0.0/8" + SecurityOptions: + - Key: "Name" + Value: "seccomp" + - Key: "Profile" + Value: "default" + - Key: "Name" + Value: "apparmor" + - Key: "Name" + Value: "selinux" + - Key: "Name" + Value: "userns" + ServerVersion: "1.9.0" + SwapLimit: false + SystemStatus: + - + - "State" + - "Healthy" + SystemTime: "2015-03-10T11:11:23.730591467-07:00" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "17.04.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.7.5" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.27" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/Config" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update` + + Images report these events: `delete, import, load, pull, push, save, tag, untag` + + Volumes report these events: `create, mount, unmount, destroy` + + Networks report these events: `create, connect, disconnect, destroy` + + The Docker daemon reports these events: `reload` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `plugin`= plugin name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon` + - `volume=` volume name or ID + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "" + Labels: null + Scope: "" + Options: null + UsageData: + Size: 0 + RefCount: 0 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than inspecting a single network. For example, + the list of containers attached to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." + type: "boolean" + Ingress: + description: "Ingress network is the network which provides the routing-mesh in swarm mode." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + example: + - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + description: "The tokens workers and managers need to join the swarm." + type: "object" + properties: + Worker: + description: "The token workers can use to join the swarm." + type: "string" + Manager: + description: "The token managers can use to join the swarm." + type: "string" + example: + CreatedAt: "2016-08-15T16:00:20.349727406Z" + Spec: + Dispatcher: + HeartbeatPeriod: 5000000000 + Orchestration: + TaskHistoryRetentionLimit: 10 + CAConfig: + NodeCertExpiry: 7776000000000000 + Raft: + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + SnapshotInterval: 10000 + ElectionTick: 3 + TaskDefaults: {} + EncryptionConfig: + AutoLockManagers: false + Name: "default" + JoinTokens: + Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" + Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + ID: "70ilmkj2f6sp2137c753w2nmt" + UpdatedAt: "2016-08-15T16:32:09.623207604Z" + Version: + Index: 51 + RootRotationInProgress: false + TLSInfo: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. This is required to avoid conflicting writes." + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "rollback" + in: "query" + type: "string" + description: "Set to this parameter to `previous` to cause a + server-side rollback to the previous service spec. The supplied spec will be + ignored in this case." + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created secret." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: "Return image digest and platform information by contacting the registry." + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + type: "object" + x-go-name: DistributionInspect + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: "A descriptor struct containing digest, media type, and size" + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: "An array containing all platforms supported by the image" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. + + > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental + > features enabled. The specifications for this endpoint may still change in a future version of the API. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session (experimental)"] diff --git a/api/templates/server/operation.gotmpl b/api/templates/server/operation.gotmpl new file mode 100644 index 0000000..3ff63ef --- /dev/null +++ b/api/templates/server/operation.gotmpl @@ -0,0 +1,26 @@ +package {{ .Package }} + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import ( + "net/http" + + context "golang.org/x/net/context" + + {{ range .DefaultImports }}{{ printf "%q" . }} + {{ end }} + {{ range $key, $value := .Imports }}{{ $key }} {{ printf "%q" $value }} + {{ end }} +) + + +{{ range .ExtraSchemas }} +// {{ .Name }} {{ template "docstring" . }} +// swagger:model {{ .Name }} +{{ template "schema" . }} +{{ end }} diff --git a/api/types/auth.go b/api/types/auth.go new file mode 100644 index 0000000..056af6b --- /dev/null +++ b/api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/api/types/backend/backend.go b/api/types/backend/backend.go new file mode 100644 index 0000000..368ad7b --- /dev/null +++ b/api/types/backend/backend.go @@ -0,0 +1,104 @@ +// Package backend includes types to send information to server backends. +package backend + +import ( + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/stderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// LogMessage is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// changes to this struct need to be reflect in the reset method in +// daemon/logger/logger.go +type LogMessage struct { + Line []byte + Source string + Timestamp time.Time + Attrs LogAttributes + Partial bool + + // Err is an error associated with a message. Completeness of a message + // with Err is not expected, tho it may be partially complete (fields may + // be missing, gibberish, or nil) + Err error +} + +// LogAttributes is used to hold the extra attributes available in the log message +// Primarily used for converting the map type to string and sorting. +type LogAttributes map[string]string + +// LogSelector is a list of services and tasks that should be returned as part +// of a log stream. It is similar to swarmapi.LogSelector, with the difference +// that the names don't have to be resolved to IDs; this is mostly to avoid +// accidents later where a swarmapi LogSelector might have been incorrectly +// used verbatim (and to avoid the handler having to import swarmapi types) +type LogSelector struct { + Services []string + Tasks []string +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// ContainerCommitConfig is a wrapper around +// types.ContainerCommitConfig that also +// transports configuration changes for a container. +type ContainerCommitConfig struct { + types.ContainerCommitConfig + Changes []string + // TODO: ContainerConfig is only used by the dockerfile Builder, so remove it + // once the Builder has been updated to use a different interface + ContainerConfig *container.Config +} diff --git a/api/types/backend/build.go b/api/types/backend/build.go new file mode 100644 index 0000000..300d358 --- /dev/null +++ b/api/types/backend/build.go @@ -0,0 +1,44 @@ +package backend + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" +) + +// PullOption defines different modes for accessing images +type PullOption int + +const ( + // PullOptionNoPull only returns local images + PullOptionNoPull PullOption = iota + // PullOptionForcePull always tries to pull a ref from the registry first + PullOptionForcePull + // PullOptionPreferLocal uses local image if it exists, otherwise pulls + PullOptionPreferLocal +) + +// ProgressWriter is a data object to transport progress streams to the client +type ProgressWriter struct { + Output io.Writer + StdoutFormatter io.Writer + StderrFormatter io.Writer + AuxFormatter *streamformatter.AuxFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} + +// BuildConfig is the configuration used by a BuildManager to start a build +type BuildConfig struct { + Source io.ReadCloser + ProgressWriter ProgressWriter + Options *types.ImageBuildOptions +} + +// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer +type GetImageAndLayerOptions struct { + PullOption PullOption + AuthConfig map[string]types.AuthConfig + Output io.Writer + Platform string +} diff --git a/api/types/blkiodev/blkio.go b/api/types/blkiodev/blkio.go new file mode 100644 index 0000000..931ae10 --- /dev/null +++ b/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/api/types/client.go b/api/types/client.go new file mode 100644 index 0000000..fc26a1c --- /dev/null +++ b/api/types/client.go @@ -0,0 +1,390 @@ +package types + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Volumes []string + + // TODO @jhowardmsft LCOW Support: This will require extending to include + // `Platform string`, but is ommited for now as it's hard-coded temporarily + // to avoid API changes. +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/api/types/configs.go b/api/types/configs.go new file mode 100644 index 0000000..e4d2ce6 --- /dev/null +++ b/api/types/configs.go @@ -0,0 +1,70 @@ +package types + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool + Platform string +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} diff --git a/api/types/container/config.go b/api/types/container/config.go new file mode 100644 index 0000000..55a03fc --- /dev/null +++ b/api/types/container/config.go @@ -0,0 +1,69 @@ +package container + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/api/types/container/container_changes.go b/api/types/container/container_changes.go new file mode 100644 index 0000000..767945a --- /dev/null +++ b/api/types/container/container_changes.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem container change response item +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/api/types/container/container_create.go b/api/types/container/container_create.go new file mode 100644 index 0000000..c95023b --- /dev/null +++ b/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody container create created body +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/api/types/container/container_top.go b/api/types/container/container_top.go new file mode 100644 index 0000000..78bc37e --- /dev/null +++ b/api/types/container/container_top.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody container top o k body +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process is an array of values corresponding to the titles + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/api/types/container/container_update.go b/api/types/container/container_update.go new file mode 100644 index 0000000..2339366 --- /dev/null +++ b/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody container update o k body +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/api/types/container/container_wait.go b/api/types/container/container_wait.go new file mode 100644 index 0000000..77ecdba --- /dev/null +++ b/api/types/container/container_wait.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBody container wait o k body +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/api/types/container/host_config.go b/api/types/container/host_config.go new file mode 100644 index 0000000..9fea9eb --- /dev/null +++ b/api/types/container/host_config.go @@ -0,0 +1,380 @@ +package container + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/api/types/container/hostconfig_unix.go b/api/types/container/hostconfig_unix.go new file mode 100644 index 0000000..2d664d1 --- /dev/null +++ b/api/types/container/hostconfig_unix.go @@ -0,0 +1,41 @@ +// +build !windows + +package container + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/api/types/container/hostconfig_windows.go b/api/types/container/hostconfig_windows.go new file mode 100644 index 0000000..469923f --- /dev/null +++ b/api/types/container/hostconfig_windows.go @@ -0,0 +1,54 @@ +package container + +import ( + "strings" +) + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/api/types/container/waitcondition.go b/api/types/container/waitcondition.go new file mode 100644 index 0000000..64820fe --- /dev/null +++ b/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/api/types/error_response.go b/api/types/error_response.go new file mode 100644 index 0000000..dc942d9 --- /dev/null +++ b/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/api/types/events/events.go b/api/types/events/events.go new file mode 100644 index 0000000..5f5f540 --- /dev/null +++ b/api/types/events/events.go @@ -0,0 +1,50 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/api/types/filters/parse.go b/api/types/filters/parse.go new file mode 100644 index 0000000..beec3d4 --- /dev/null +++ b/api/types/filters/parse.go @@ -0,0 +1,310 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into a string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). +func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + // for daemons older than v1.10, filter must be of the form map[string][]string + var buf []byte + var err error + if version != "" && versions.LessThan(version, "1.22") { + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) + } + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + if len(filters.fields[name]) == 0 { + delete(filters.fields, name) + } + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(filters.fields[field]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/api/types/filters/parse_test.go b/api/types/filters/parse_test.go new file mode 100644 index 0000000..8198f89 --- /dev/null +++ b/api/types/filters/parse_test.go @@ -0,0 +1,417 @@ +package filters + +import ( + "errors" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = NewArgs() + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args.Get("created")) != 1 { + t.Error("failed to set this arg") + } + if len(args.Get("image.name")) != 2 { + t.Error("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args.Len() != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestToParamWithVersion(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + str1, err := ToParamWithVersion("1.21", a) + if err != nil { + t.Errorf("failed to marshal the filters with version < 1.22: %s", err) + } + str2, err := ToParamWithVersion("1.22", a) + if err != nil { + t.Errorf("failed to marshal the filters with version >= 1.22: %s", err) + } + if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` && + str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` { + t.Errorf("incorrectly marshaled the filters: %s", str1) + } + if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` && + str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` { + t.Errorf("incorrectly marshaled the filters: %s", str2) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valid := map[*Args][]string{ + &Args{fields: map[string]map[string]bool{"key": {"value": true}}}: { + `{"key": ["value"]}`, + `{"key": {"value": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { + `{"key": ["value1", "value2"]}`, + `{"key": {"value1": true, "value2": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { + `{"key1": ["value1"], "key2": ["value2"]}`, + `{"key1": {"value1": true}, "key2": {"value2": true}}`, + }, + } + + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + + for expectedArgs, matchers := range valid { + for _, json := range matchers { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if args.Len() != expectedArgs.Len() { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs.fields { + values := args.Get(key) + + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + + for _, v := range values { + if !expectedValues[v] { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if a.Len() != v1.Len() { + t.Error("these should both be empty sets") + } +} + +func TestArgsMatchKVListEmptySources(t *testing.T) { + args := NewArgs() + if !args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected true for (%v,created), got true", args) + } + + args = Args{map[string]map[string]bool{"created": {"today": true}}} + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } +} + +func TestArgsMatchKVList(t *testing.T) { + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value1": true}}, + }: "labels", + } + + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key4": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value3": true}}, + }: "labels", + } + + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "today", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to*": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tod": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"anyting": true, "to*": true}}, + }: "created", + } + + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tomorrow": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(day": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today1": true}, + "labels": map[string]bool{"today": true}}, + }: "created", + } + + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} + +func TestAdd(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + v := f.fields["status"] + if len(v) != 1 || !v["running"] { + t.Fatalf("Expected to include a running status, got %v", v) + } + + f.Add("status", "paused") + if len(v) != 2 || !v["paused"] { + t.Fatalf("Expected to include a paused status, got %v", v) + } +} + +func TestDel(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Del("status", "running") + v := f.fields["status"] + if v["running"] { + t.Fatal("Expected to not include a running status filter, got true") + } +} + +func TestLen(t *testing.T) { + f := NewArgs() + if f.Len() != 0 { + t.Fatal("Expected to not include any field") + } + f.Add("status", "running") + if f.Len() != 1 { + t.Fatal("Expected to include one field") + } +} + +func TestExactMatch(t *testing.T) { + f := NewArgs() + + if !f.ExactMatch("status", "running") { + t.Fatal("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + f.Add("status", "pause*") + + if !f.ExactMatch("status", "running") { + t.Fatal("Expected to match `running` with one of the filters, got false") + } + + if f.ExactMatch("status", "paused") { + t.Fatal("Expected to not match `paused` with one of the filters, got true") + } +} + +func TestOnlyOneExactMatch(t *testing.T) { + f := NewArgs() + + if !f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + + if !f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to match `running` with one of the filters, got false") + } + + if f.UniqueExactMatch("status", "paused") { + t.Fatal("Expected to not match `paused` with one of the filters, got true") + } + + f.Add("status", "pause") + if f.UniqueExactMatch("status", "running") { + t.Fatal("Expected to not match only `running` with two filters, got true") + } +} + +func TestInclude(t *testing.T) { + f := NewArgs() + if f.Include("status") { + t.Fatal("Expected to not include a status key, got true") + } + f.Add("status", "running") + if !f.Include("status") { + t.Fatal("Expected to include a status key, got false") + } +} + +func TestValidate(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + + valid := map[string]bool{ + "status": true, + "dangling": true, + } + + if err := f.Validate(valid); err != nil { + t.Fatal(err) + } + + f.Add("bogus", "running") + if err := f.Validate(valid); err == nil { + t.Fatal("Expected to return an error, got nil") + } +} + +func TestWalkValues(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Add("status", "paused") + + f.WalkValues("status", func(value string) error { + if value != "running" && value != "paused" { + t.Fatalf("Unexpected value %s", value) + } + return nil + }) + + err := f.WalkValues("status", func(value string) error { + return errors.New("return") + }) + if err == nil { + t.Fatal("Expected to get an error, got nil") + } + + err = f.WalkValues("foo", func(value string) error { + return errors.New("return") + }) + if err != nil { + t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err) + } +} + +func TestFuzzyMatch(t *testing.T) { + f := NewArgs() + f.Add("container", "foo") + + cases := map[string]bool{ + "foo": true, + "foobar": true, + "barfoo": false, + "bar": false, + } + for source, match := range cases { + got := f.FuzzyMatch("container", source) + if got != match { + t.Fatalf("Expected %v, got %v: %s", match, got, source) + } + } +} diff --git a/api/types/graph_driver_data.go b/api/types/graph_driver_data.go new file mode 100644 index 0000000..4d9bf1c --- /dev/null +++ b/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/api/types/id_response.go b/api/types/id_response.go new file mode 100644 index 0000000..7592d2f --- /dev/null +++ b/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/api/types/image/image_history.go b/api/types/image/image_history.go new file mode 100644 index 0000000..0dd30c7 --- /dev/null +++ b/api/types/image/image_history.go @@ -0,0 +1,37 @@ +package image + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem history response item +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/api/types/image_delete_response_item.go b/api/types/image_delete_response_item.go new file mode 100644 index 0000000..b9a65a0 --- /dev/null +++ b/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/api/types/image_summary.go b/api/types/image_summary.go new file mode 100644 index 0000000..e145b3d --- /dev/null +++ b/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/api/types/mount/mount.go b/api/types/mount/mount.go new file mode 100644 index 0000000..2744f85 --- /dev/null +++ b/api/types/mount/mount.go @@ -0,0 +1,128 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind-mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/api/types/network/network.go b/api/types/network/network.go new file mode 100644 index 0000000..7c7dbac --- /dev/null +++ b/api/types/network/network.go @@ -0,0 +1,108 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} diff --git a/api/types/plugin.go b/api/types/plugin.go new file mode 100644 index 0000000..ed3c2c2 --- /dev/null +++ b/api/types/plugin.go @@ -0,0 +1,200 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True when the plugin is running. False when the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/api/types/plugin_device.go b/api/types/plugin_device.go new file mode 100644 index 0000000..5699010 --- /dev/null +++ b/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/api/types/plugin_env.go b/api/types/plugin_env.go new file mode 100644 index 0000000..32962dc --- /dev/null +++ b/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/api/types/plugin_interface_type.go b/api/types/plugin_interface_type.go new file mode 100644 index 0000000..c82f204 --- /dev/null +++ b/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/api/types/plugin_mount.go b/api/types/plugin_mount.go new file mode 100644 index 0000000..5c031cf --- /dev/null +++ b/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/api/types/plugin_responses.go b/api/types/plugin_responses.go new file mode 100644 index 0000000..1c6461f --- /dev/null +++ b/api/types/plugin_responses.go @@ -0,0 +1,79 @@ +package types + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/api/types/plugins/logdriver/entry.pb.go b/api/types/plugins/logdriver/entry.pb.go new file mode 100644 index 0000000..5d7d8b4 --- /dev/null +++ b/api/types/plugins/logdriver/entry.pb.go @@ -0,0 +1,449 @@ +// Code generated by protoc-gen-gogo. +// source: entry.proto +// DO NOT EDIT! + +/* + Package logdriver is a generated protocol buffer package. + + It is generated from these files: + entry.proto + + It has these top-level messages: + LogEntry +*/ +package logdriver + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type LogEntry struct { + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + TimeNano int64 `protobuf:"varint,2,opt,name=time_nano,json=timeNano,proto3" json:"time_nano,omitempty"` + Line []byte `protobuf:"bytes,3,opt,name=line,proto3" json:"line,omitempty"` + Partial bool `protobuf:"varint,4,opt,name=partial,proto3" json:"partial,omitempty"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{0} } + +func (m *LogEntry) GetSource() string { + if m != nil { + return m.Source + } + return "" +} + +func (m *LogEntry) GetTimeNano() int64 { + if m != nil { + return m.TimeNano + } + return 0 +} + +func (m *LogEntry) GetLine() []byte { + if m != nil { + return m.Line + } + return nil +} + +func (m *LogEntry) GetPartial() bool { + if m != nil { + return m.Partial + } + return false +} + +func init() { + proto.RegisterType((*LogEntry)(nil), "LogEntry") +} +func (m *LogEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Source) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if m.TimeNano != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEntry(dAtA, i, uint64(m.TimeNano)) + } + if len(m.Line) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Line))) + i += copy(dAtA[i:], m.Line) + } + if m.Partial { + dAtA[i] = 0x20 + i++ + if m.Partial { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func encodeFixed64Entry(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Entry(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEntry(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *LogEntry) Size() (n int) { + var l int + _ = l + l = len(m.Source) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.TimeNano != 0 { + n += 1 + sovEntry(uint64(m.TimeNano)) + } + l = len(m.Line) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.Partial { + n += 2 + } + return n +} + +func sovEntry(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEntry(x uint64) (n int) { + return sovEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LogEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeNano", wireType) + } + m.TimeNano = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeNano |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = append(m.Line[:0], dAtA[iNdEx:postIndex]...) + if m.Line == nil { + m.Line = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partial", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partial = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEntry + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEntry(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEntry + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEntry(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEntry = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEntry = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("entry.proto", fileDescriptorEntry) } + +var fileDescriptorEntry = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2b, 0x29, + 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0xca, 0xe5, 0xe2, 0xf0, 0xc9, 0x4f, 0x77, 0x05, + 0x89, 0x08, 0x89, 0x71, 0xb1, 0x15, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x41, 0x79, 0x42, 0xd2, 0x5c, 0x9c, 0x25, 0x99, 0xb9, 0xa9, 0xf1, 0x79, 0x89, 0x79, + 0xf9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x1c, 0x20, 0x01, 0xbf, 0xc4, 0xbc, 0x7c, 0x21, + 0x21, 0x2e, 0x96, 0x9c, 0xcc, 0xbc, 0x54, 0x09, 0x66, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, + 0x48, 0x82, 0x8b, 0xbd, 0x20, 0xb1, 0xa8, 0x24, 0x33, 0x31, 0x47, 0x82, 0x45, 0x81, 0x51, 0x83, + 0x23, 0x08, 0xc6, 0x75, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, + 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0x6e, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x24, 0x5a, + 0xd4, 0x92, 0x00, 0x00, 0x00, +} diff --git a/api/types/plugins/logdriver/entry.proto b/api/types/plugins/logdriver/entry.proto new file mode 100644 index 0000000..a4e96ea --- /dev/null +++ b/api/types/plugins/logdriver/entry.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +message LogEntry { + string source = 1; + int64 time_nano = 2; + bytes line = 3; + bool partial = 4; +} diff --git a/api/types/plugins/logdriver/gen.go b/api/types/plugins/logdriver/gen.go new file mode 100644 index 0000000..068f987 --- /dev/null +++ b/api/types/plugins/logdriver/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc --gogofast_out=import_path=github.com/docker/docker/api/types/plugins/logdriver:. entry.proto + +package logdriver diff --git a/api/types/plugins/logdriver/io.go b/api/types/plugins/logdriver/io.go new file mode 100644 index 0000000..8c1ed49 --- /dev/null +++ b/api/types/plugins/logdriver/io.go @@ -0,0 +1,87 @@ +package logdriver + +import ( + "encoding/binary" + "io" +) + +const binaryEncodeLen = 4 + +// LogEntryEncoder encodes a LogEntry to a protobuf stream +// The stream should look like: +// +// [uint32 binary encoded message size][protobuf message] +// +// To decode an entry, read the first 4 bytes to get the size of the entry, +// then read `size` bytes from the stream. +type LogEntryEncoder interface { + Encode(*LogEntry) error +} + +// NewLogEntryEncoder creates a protobuf stream encoder for log entries. +// This is used to write out log entries to a stream. +func NewLogEntryEncoder(w io.Writer) LogEntryEncoder { + return &logEntryEncoder{ + w: w, + buf: make([]byte, 1024), + } +} + +type logEntryEncoder struct { + buf []byte + w io.Writer +} + +func (e *logEntryEncoder) Encode(l *LogEntry) error { + n := l.Size() + + total := n + binaryEncodeLen + if total > len(e.buf) { + e.buf = make([]byte, total) + } + binary.BigEndian.PutUint32(e.buf, uint32(n)) + + if _, err := l.MarshalTo(e.buf[binaryEncodeLen:]); err != nil { + return err + } + _, err := e.w.Write(e.buf[:total]) + return err +} + +// LogEntryDecoder decodes log entries from a stream +// It is expected that the wire format is as defined by LogEntryEncoder. +type LogEntryDecoder interface { + Decode(*LogEntry) error +} + +// NewLogEntryDecoder creates a new stream decoder for log entries +func NewLogEntryDecoder(r io.Reader) LogEntryDecoder { + return &logEntryDecoder{ + lenBuf: make([]byte, binaryEncodeLen), + buf: make([]byte, 1024), + r: r, + } +} + +type logEntryDecoder struct { + r io.Reader + lenBuf []byte + buf []byte +} + +func (d *logEntryDecoder) Decode(l *LogEntry) error { + _, err := io.ReadFull(d.r, d.lenBuf) + if err != nil { + return err + } + + size := int(binary.BigEndian.Uint32(d.lenBuf)) + if len(d.buf) < size { + d.buf = make([]byte, size) + } + + if _, err := io.ReadFull(d.r, d.buf[:size]); err != nil { + return err + } + return l.Unmarshal(d.buf[:size]) +} diff --git a/api/types/port.go b/api/types/port.go new file mode 100644 index 0000000..ad52d46 --- /dev/null +++ b/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // IP + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/api/types/registry/authenticate.go b/api/types/registry/authenticate.go new file mode 100644 index 0000000..42cac44 --- /dev/null +++ b/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/api/types/registry/registry.go b/api/types/registry/registry.go new file mode 100644 index 0000000..b98a943 --- /dev/null +++ b/api/types/registry/registry.go @@ -0,0 +1,119 @@ +package registry + +import ( + "encoding/json" + "net" + + "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/api/types/seccomp.go b/api/types/seccomp.go new file mode 100644 index 0000000..7d62c9a --- /dev/null +++ b/api/types/seccomp.go @@ -0,0 +1,93 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent a specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` +} diff --git a/api/types/service_update_response.go b/api/types/service_update_response.go new file mode 100644 index 0000000..74ea64b --- /dev/null +++ b/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/api/types/stats.go b/api/types/stats.go new file mode 100644 index 0000000..7ca76a5 --- /dev/null +++ b/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/api/types/strslice/strslice.go b/api/types/strslice/strslice.go new file mode 100644 index 0000000..bad493f --- /dev/null +++ b/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/api/types/strslice/strslice_test.go b/api/types/strslice/strslice_test.go new file mode 100644 index 0000000..1163b36 --- /dev/null +++ b/api/types/strslice/strslice_test.go @@ -0,0 +1,86 @@ +package strslice + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestStrSliceMarshalJSON(t *testing.T) { + for _, testcase := range []struct { + input StrSlice + expected string + }{ + // MADNESS(stevvooe): No clue why nil would be "" but empty would be + // "null". Had to make a change here that may affect compatibility. + {input: nil, expected: "null"}, + {StrSlice{}, "[]"}, + {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, + } { + data, err := json.Marshal(testcase.input) + if err != nil { + t.Fatal(err) + } + if string(data) != testcase.expected { + t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) + } + } +} + +func TestStrSliceUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + strs := StrSlice{"default", "values"} + if err := strs.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := []string(strs) + if !reflect.DeepEqual(actualParts, expectedParts) { + t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) + } + + } +} + +func TestStrSliceUnmarshalString(t *testing.T) { + var e StrSlice + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} + +func TestStrSliceUnmarshalSlice(t *testing.T) { + var e StrSlice + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} diff --git a/api/types/swarm/common.go b/api/types/swarm/common.go new file mode 100644 index 0000000..54af82b --- /dev/null +++ b/api/types/swarm/common.go @@ -0,0 +1,40 @@ +package swarm + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/api/types/swarm/config.go b/api/types/swarm/config.go new file mode 100644 index 0000000..0fb021c --- /dev/null +++ b/api/types/swarm/config.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget + ConfigID string + ConfigName string +} diff --git a/api/types/swarm/container.go b/api/types/swarm/container.go new file mode 100644 index 0000000..6f8b45f --- /dev/null +++ b/api/types/swarm/container.go @@ -0,0 +1,72 @@ +package swarm + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` +} diff --git a/api/types/swarm/network.go b/api/types/swarm/network.go new file mode 100644 index 0000000..97c484e --- /dev/null +++ b/api/types/swarm/network.go @@ -0,0 +1,119 @@ +package swarm + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/api/types/swarm/node.go b/api/types/swarm/node.go new file mode 100644 index 0000000..28c6851 --- /dev/null +++ b/api/types/swarm/node.go @@ -0,0 +1,115 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/api/types/swarm/runtime.go b/api/types/swarm/runtime.go new file mode 100644 index 0000000..c4c731d --- /dev/null +++ b/api/types/swarm/runtime.go @@ -0,0 +1,19 @@ +package swarm + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) diff --git a/api/types/swarm/secret.go b/api/types/swarm/secret.go new file mode 100644 index 0000000..fdb2388 --- /dev/null +++ b/api/types/swarm/secret.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/api/types/swarm/service.go b/api/types/swarm/service.go new file mode 100644 index 0000000..fa31a7e --- /dev/null +++ b/api/types/swarm/service.go @@ -0,0 +1,124 @@ +package swarm + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} diff --git a/api/types/swarm/swarm.go b/api/types/swarm/swarm.go new file mode 100644 index 0000000..5b74f14 --- /dev/null +++ b/api/types/swarm/swarm.go @@ -0,0 +1,217 @@ +package swarm + +import "time" + +// ClusterInfo represents info about the cluster for outputing in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/api/types/swarm/task.go b/api/types/swarm/task.go new file mode 100644 index 0000000..a598a79 --- /dev/null +++ b/api/types/swarm/task.go @@ -0,0 +1,149 @@ +package swarm + +import "time" + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + ContainerSpec ContainerSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string `json:",omitempty"` + PID int `json:",omitempty"` + ExitCode int `json:",omitempty"` +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/api/types/time/duration_convert.go b/api/types/time/duration_convert.go new file mode 100644 index 0000000..63e1eec --- /dev/null +++ b/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/api/types/time/duration_convert_test.go b/api/types/time/duration_convert_test.go new file mode 100644 index 0000000..869c08f --- /dev/null +++ b/api/types/time/duration_convert_test.go @@ -0,0 +1,26 @@ +package time + +import ( + "testing" + "time" +) + +func TestDurationToSecondsString(t *testing.T) { + cases := []struct { + in time.Duration + expected string + }{ + {0 * time.Second, "0"}, + {1 * time.Second, "1"}, + {1 * time.Minute, "60"}, + {24 * time.Hour, "86400"}, + } + + for _, c := range cases { + s := DurationToSecondsString(c.in) + if s != c.expected { + t.Errorf("wrong value for input `%v`: expected `%s`, got `%s`", c.in, c.expected, s) + t.Fail() + } + } +} diff --git a/api/types/time/timestamp.go b/api/types/time/timestamp.go new file mode 100644 index 0000000..9aa9702 --- /dev/null +++ b/api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/api/types/time/timestamp_test.go b/api/types/time/timestamp_test.go new file mode 100644 index 0000000..a165130 --- /dev/null +++ b/api/types/time/timestamp_test.go @@ -0,0 +1,93 @@ +package time + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now().In(time.UTC) + cases := []struct { + in, expected string + expectedErr bool + }{ + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, + {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, + {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, + {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, + {"2006-01-02T15:04:05", "1136214245.000000000", false}, + {"2006-01-02T15:04:0Z", "", true}, + {"2006-01-02T15:04:0", "", true}, + {"2006-01-02T15:04Z", "1136214240.000000000", false}, + {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04", "1136214240.000000000", false}, + {"2006-01-02T15:0Z", "", true}, + {"2006-01-02T15:0", "", true}, + {"2006-01-02T15Z", "1136214000.000000000", false}, + {"2006-01-02T15+00:00", "1136214000.000000000", false}, + {"2006-01-02T15-00:00", "1136214000.000000000", false}, + {"2006-01-02T15", "1136214000.000000000", false}, + {"2006-01-02T1Z", "1136163600.000000000", false}, + {"2006-01-02T1", "1136163600.000000000", false}, + {"2006-01-02TZ", "", true}, + {"2006-01-02T", "", true}, + {"2006-01-02+00:00", "1136160000.000000000", false}, + {"2006-01-02-00:00", "1136160000.000000000", false}, + {"2006-01-02-00:01", "1136160060.000000000", false}, + {"2006-01-02Z", "1136160000.000000000", false}, + {"2006-01-02", "1136160000.000000000", false}, + {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, + + // unix timestamps returned as is + {"1136073600", "1136073600", false}, + {"1136073600.000000001", "1136073600.000000001", false}, + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + + // String fallback + {"invalid", "invalid", false}, + } + + for _, c := range cases { + o, err := GetTimestamp(c.in, now) + if o != c.expected || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) + t.Fail() + } + } +} + +func TestParseTimestamps(t *testing.T) { + cases := []struct { + in string + def, expectedS, expectedN int64 + expectedErr bool + }{ + // unix timestamps + {"1136073600", 0, 1136073600, 0, false}, + {"1136073600.000000001", 0, 1136073600, 1, false}, + {"1136073600.0000000010", 0, 1136073600, 1, false}, + {"1136073600.00000001", 0, 1136073600, 10, false}, + {"foo.bar", 0, 0, 0, true}, + {"1136073600.bar", 0, 1136073600, 0, true}, + {"", -1, -1, 0, false}, + } + + for _, c := range cases { + s, n, err := ParseTimestamps(c.in, c.def) + if s != c.expectedS || + n != c.expectedN || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) + t.Fail() + } + } +} diff --git a/api/types/types.go b/api/types/types.go new file mode 100644 index 0000000..b08bcb9 --- /dev/null +++ b/api/types/types.go @@ -0,0 +1,575 @@ +package types + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Engine string + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuilderSize int64 +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} diff --git a/api/types/versions/README.md b/api/types/versions/README.md new file mode 100644 index 0000000..1ef911e --- /dev/null +++ b/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/api/types/versions/compare.go b/api/types/versions/compare.go new file mode 100644 index 0000000..611d4fe --- /dev/null +++ b/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/api/types/versions/compare_test.go b/api/types/versions/compare_test.go new file mode 100644 index 0000000..c2b9686 --- /dev/null +++ b/api/types/versions/compare_test.go @@ -0,0 +1,26 @@ +package versions + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := compare(a, b); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) +} diff --git a/api/types/versions/v1p19/types.go b/api/types/versions/v1p19/types.go new file mode 100644 index 0000000..dc13150 --- /dev/null +++ b/api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/api/types/versions/v1p20/types.go b/api/types/versions/v1p20/types.go new file mode 100644 index 0000000..94a06d7 --- /dev/null +++ b/api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/api/types/volume.go b/api/types/volume.go new file mode 100644 index 0000000..a69b0cf --- /dev/null +++ b/api/types/volume.go @@ -0,0 +1,61 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Time volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData volume usage data +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. + // Required: true + RefCount int64 `json:"RefCount"` + + // The disk space used by the volume (local driver only) + // Required: true + Size int64 `json:"Size"` +} diff --git a/api/types/volume/volumes_create.go b/api/types/volume/volumes_create.go new file mode 100644 index 0000000..9f70e43 --- /dev/null +++ b/api/types/volume/volumes_create.go @@ -0,0 +1,29 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// VolumesCreateBody volumes create body +// swagger:model VolumesCreateBody +type VolumesCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // Required: true + Name string `json:"Name"` +} diff --git a/api/types/volume/volumes_list.go b/api/types/volume/volumes_list.go new file mode 100644 index 0000000..833dad9 --- /dev/null +++ b/api/types/volume/volumes_list.go @@ -0,0 +1,23 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumesListOKBody volumes list o k body +// swagger:model VolumesListOKBody +type VolumesListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/build-allarch.sh b/build-allarch.sh new file mode 100755 index 0000000..f24abee --- /dev/null +++ b/build-allarch.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +set -o errexit + +#echo "Building x86_64.." +#docker build -f Dockerfile.build.x86_64 -t docker-build-x86_64 . +#docker run --rm -v "$(pwd):/docker" docker-build-x86_64 ./build.sh + +#echo "Building i386.." +#docker build -f Dockerfile.build.i386 -t docker-build-i386 . +#docker run --rm -v "$(pwd):/docker" docker-build-i386 ./build.sh + +echo "Building armv7.." +docker run --rm -e GOARM=7 -v "$(pwd):/docker" docker-build-arm /bin/sh build.sh + +#echo "Building aarch64.." +#docker build -f Dockerfile.build.aarch64 -t docker-build-aarch64 . +#docker run --rm -v "$(pwd):/docker" docker-build-aarch64 /bin/sh build.sh diff --git a/build-arm.sh b/build-arm.sh new file mode 100755 index 0000000..e8d2122 --- /dev/null +++ b/build-arm.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +set -o errexit + +arch="armv7" + +version=$(git describe --tags --always) + +AUTO_GOPATH=1 \ +CC=arm-linux-gnueabi-gcc GOOS=linux GOARCH=arm GOARM=7 CGO_ENABLED=1 \ +DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' \ +./hack/make.sh binary-docker + +src="bundles/latest/binary-docker" +dst="docker" + +rm -rf "$dst" +mkdir "$dst" + +cp -L "$src/docker" "$dst/docker" +#strip --strip-program=arm-linux-gnueabi-strip "$dst/docker" + +ln -s docker "$dst/dockerd" +ln -s docker "$dst/docker-containerd" +ln -s docker "$dst/docker-containerd-ctr" +ln -s docker "$dst/docker-containerd-shim" +ln -s docker "$dst/docker-proxy" +ln -s docker "$dst/docker-runc" + +tar czfv "docker-$version-$arch.tar.gz" "$dst" diff --git a/build.sh b/build.sh new file mode 100755 index 0000000..79c848e --- /dev/null +++ b/build.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +set -o errexit + +case "$(go env GOARCH)" in + "arm") + arch="armv$(go env GOARM)" + ;; + "arm64") + arch="aarch64" + ;; + "386") + arch="i386" + ;; + "amd64") + arch="x86_64" + ;; +esac + +version=$(git describe --tags --always) + +AUTO_GOPATH=1 \ +DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' \ +./hack/make.sh binary-docker + +src="bundles/latest/binary-docker" +dst="docker" + +rm -rf "$dst" +mkdir "$dst" + +cp -L "$src/docker" "$dst/docker" +strip "$dst/docker" + +ln -s docker "$dst/dockerd" +ln -s docker "$dst/docker-containerd" +ln -s docker "$dst/docker-containerd-ctr" +ln -s docker "$dst/docker-containerd-shim" +ln -s docker "$dst/docker-proxy" +ln -s docker "$dst/docker-runc" + +tar czfv "docker-$version-$arch.tar.gz" "$dst" diff --git a/builder/builder.go b/builder/builder.go new file mode 100644 index 0000000..e480601 --- /dev/null +++ b/builder/builder.go @@ -0,0 +1,105 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "golang.org/x/net/context" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Source defines a location that can be used as a source for the ADD/COPY +// instructions in the builder. +type Source interface { + // Root returns root path for accessing source + Root() string + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Hash returns a checksum for a file + Hash(path string) (string, error) +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + ImageBackend + ExecBackend + + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *backend.ContainerCommitConfig) (string, error) + // ContainerCreateWorkdir creates the workdir + ContainerCreateWorkdir(containerID string) error + + CreateImage(config []byte, parent string, platform string) (Image, error) + + ImageCacheBuilder +} + +// ImageBackend are the interface methods required from an image component +type ImageBackend interface { + GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error) +} + +// ExecBackend contains the interface methods required for executing containers +type ExecBackend interface { + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) +} + +// Result is the output produced by a Builder +type Result struct { + ImageID string + FromImage Image +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string, platform string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCache returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config + MarshalJSON() ([]byte, error) +} + +// ReleaseableLayer is an image layer that can be mounted and released +type ReleaseableLayer interface { + Release() error + Mount() (string, error) + Commit(platform string) (ReleaseableLayer, error) + DiffID() layer.DiffID +} diff --git a/builder/dockerfile/bflag.go b/builder/dockerfile/bflag.go new file mode 100644 index 0000000..d849661 --- /dev/null +++ b/builder/dockerfile/bflag.go @@ -0,0 +1,183 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags returns the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// NewBFlagsWithArgs returns the new BFlags struct with Args set to args +func NewBFlagsWithArgs(args []string) *BFlags { + flags := NewBFlags() + flags.Args = args + return flags +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic("No idea what kind of flag we have! Should never get here!") + } + + } + + return nil +} diff --git a/builder/dockerfile/bflag_test.go b/builder/dockerfile/bflag_test.go new file mode 100644 index 0000000..ac07e48 --- /dev/null +++ b/builder/dockerfile/bflag_test.go @@ -0,0 +1,187 @@ +package dockerfile + +import ( + "testing" +) + +func TestBuilderFlags(t *testing.T) { + var expected string + var err error + + // --- + + bf := NewBFlags() + bf.Args = []string{} + if err := bf.Parse(); err != nil { + t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + bf.Args = []string{"--"} + if err := bf.Parse(); err != nil { + t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + flStr1 := bf.AddString("str1", "") + flBool1 := bf.AddBool("bool1", false) + bf.Args = []string{} + if err = bf.Parse(); err != nil { + t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.IsUsed() == true { + t.Fatal("Test3 - str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatal("Test3 - bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "HI" { + t.Fatal("Str1 was supposed to default to: HI") + } + if flBool1.IsTrue() { + t.Fatal("Bool1 was supposed to default to: false") + } + if flStr1.IsUsed() == true { + t.Fatal("Str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatal("Bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1="} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "BYE" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatal("Test-b1 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=true"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatal("Test-b2 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flBool1.IsTrue() { + t.Fatal("Test-b3 Bool1 was supposed to be false") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool2"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1", "--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "BYE" { + t.Fatalf("Test %s, str1 should be BYE", bf.Args) + } + if !flBool1.IsTrue() { + t.Fatalf("Test %s, bool1 should be true", bf.Args) + } +} diff --git a/builder/dockerfile/buildargs.go b/builder/dockerfile/buildargs.go new file mode 100644 index 0000000..e0daf9a --- /dev/null +++ b/builder/dockerfile/buildargs.go @@ -0,0 +1,148 @@ +package dockerfile + +import ( + "fmt" + "io" + + "github.com/docker/docker/runconfig/opts" +) + +// builtinAllowedBuildArgs is list of built-in allowed build args +// these args are considered transparent and are excluded from the image history. +// Filtering from history is implemented in dispatchers.go +var builtinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// buildArgs manages arguments used by the builder +type buildArgs struct { + // args that are allowed for expansion/substitution and passing to commands in 'run'. + allowedBuildArgs map[string]*string + // args defined before the first `FROM` in a Dockerfile + allowedMetaArgs map[string]*string + // args referenced by the Dockerfile + referencedArgs map[string]struct{} + // args provided by the user on the command line + argsFromOptions map[string]*string +} + +func newBuildArgs(argsFromOptions map[string]*string) *buildArgs { + return &buildArgs{ + allowedBuildArgs: make(map[string]*string), + allowedMetaArgs: make(map[string]*string), + referencedArgs: make(map[string]struct{}), + argsFromOptions: argsFromOptions, + } +} + +// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were +// passed but not consumed during build. Print a warning, if there are any. +func (b *buildArgs) WarnOnUnusedBuildArgs(out io.Writer) { + leftoverArgs := []string{} + for arg := range b.argsFromOptions { + _, isReferenced := b.referencedArgs[arg] + _, isBuiltin := builtinAllowedBuildArgs[arg] + if !isBuiltin && !isReferenced { + leftoverArgs = append(leftoverArgs, arg) + } + } + if len(leftoverArgs) > 0 { + fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } +} + +// ResetAllowed clears the list of args that are allowed to be used by a +// directive +func (b *buildArgs) ResetAllowed() { + b.allowedBuildArgs = make(map[string]*string) +} + +// AddMetaArg adds a new meta arg that can be used by FROM directives +func (b *buildArgs) AddMetaArg(key string, value *string) { + b.allowedMetaArgs[key] = value +} + +// AddArg adds a new arg that can be used by directives +func (b *buildArgs) AddArg(key string, value *string) { + b.allowedBuildArgs[key] = value + b.referencedArgs[key] = struct{}{} +} + +// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been +// referenced by the Dockerfile. Returns true if the arg is not a builtin or +// if the builtin has been referenced in the Dockerfile. +func (b *buildArgs) IsReferencedOrNotBuiltin(key string) bool { + _, isBuiltin := builtinAllowedBuildArgs[key] + _, isAllowed := b.allowedBuildArgs[key] + return isAllowed || !isBuiltin +} + +// GetAllAllowed returns a mapping with all the allowed args +func (b *buildArgs) GetAllAllowed() map[string]string { + return b.getAllFromMapping(b.allowedBuildArgs) +} + +// GetAllMeta returns a mapping with all the meta meta args +func (b *buildArgs) GetAllMeta() map[string]string { + return b.getAllFromMapping(b.allowedMetaArgs) +} + +func (b *buildArgs) getAllFromMapping(source map[string]*string) map[string]string { + m := make(map[string]string) + + keys := keysFromMaps(source, builtinAllowedBuildArgs) + for _, key := range keys { + v, ok := b.getBuildArg(key, source) + if ok { + m[key] = v + } + } + return m +} + +// FilterAllowed returns all allowed args without the filtered args +func (b *buildArgs) FilterAllowed(filter []string) []string { + envs := []string{} + configEnv := opts.ConvertKVStringsToMap(filter) + + for key, val := range b.GetAllAllowed() { + if _, ok := configEnv[key]; !ok { + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + } + return envs +} + +func (b *buildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) { + defaultValue, exists := mapping[key] + // Return override from options if one is defined + if v, ok := b.argsFromOptions[key]; ok && v != nil { + return *v, ok + } + + if defaultValue == nil { + if v, ok := b.allowedMetaArgs[key]; ok && v != nil { + return *v, ok + } + return "", false + } + return *defaultValue, exists +} + +func keysFromMaps(source map[string]*string, builtin map[string]bool) []string { + keys := []string{} + for key := range source { + keys = append(keys, key) + } + for key := range builtin { + keys = append(keys, key) + } + return keys +} diff --git a/builder/dockerfile/buildargs_test.go b/builder/dockerfile/buildargs_test.go new file mode 100644 index 0000000..77113ea --- /dev/null +++ b/builder/dockerfile/buildargs_test.go @@ -0,0 +1,100 @@ +package dockerfile + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func strPtr(source string) *string { + return &source +} + +func TestGetAllAllowed(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ArgNotUsedInDockerfile": strPtr("fromopt1"), + "ArgOverriddenByOptions": strPtr("fromopt2"), + "ArgNoDefaultInDockerfileFromOptions": strPtr("fromopt3"), + "HTTP_PROXY": strPtr("theproxy"), + }) + + buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1")) + buildArgs.AddMetaArg("ArgFromMetaOverriden", strPtr("frommeta2")) + buildArgs.AddMetaArg("ArgFromMetaNotUsed", strPtr("frommeta3")) + + buildArgs.AddArg("ArgOverriddenByOptions", strPtr("fromdockerfile2")) + buildArgs.AddArg("ArgWithDefaultInDockerfile", strPtr("fromdockerfile1")) + buildArgs.AddArg("ArgNoDefaultInDockerfile", nil) + buildArgs.AddArg("ArgNoDefaultInDockerfileFromOptions", nil) + buildArgs.AddArg("ArgFromMeta", nil) + buildArgs.AddArg("ArgFromMetaOverriden", strPtr("fromdockerfile3")) + + all := buildArgs.GetAllAllowed() + expected := map[string]string{ + "HTTP_PROXY": "theproxy", + "ArgOverriddenByOptions": "fromopt2", + "ArgWithDefaultInDockerfile": "fromdockerfile1", + "ArgNoDefaultInDockerfileFromOptions": "fromopt3", + "ArgFromMeta": "frommeta1", + "ArgFromMetaOverriden": "fromdockerfile3", + } + assert.Equal(t, expected, all) +} + +func TestGetAllMeta(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ArgNotUsedInDockerfile": strPtr("fromopt1"), + "ArgOverriddenByOptions": strPtr("fromopt2"), + "ArgNoDefaultInMetaFromOptions": strPtr("fromopt3"), + "HTTP_PROXY": strPtr("theproxy"), + }) + + buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1")) + buildArgs.AddMetaArg("ArgOverriddenByOptions", strPtr("frommeta2")) + buildArgs.AddMetaArg("ArgNoDefaultInMetaFromOptions", nil) + + all := buildArgs.GetAllMeta() + expected := map[string]string{ + "HTTP_PROXY": "theproxy", + "ArgFromMeta": "frommeta1", + "ArgOverriddenByOptions": "fromopt2", + "ArgNoDefaultInMetaFromOptions": "fromopt3", + } + assert.Equal(t, expected, all) +} + +func TestWarnOnUnusedBuildArgs(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ThisArgIsUsed": strPtr("fromopt1"), + "ThisArgIsNotUsed": strPtr("fromopt2"), + "HTTPS_PROXY": strPtr("referenced builtin"), + "HTTP_PROXY": strPtr("unreferenced builtin"), + }) + buildArgs.AddArg("ThisArgIsUsed", nil) + buildArgs.AddArg("HTTPS_PROXY", nil) + + buffer := new(bytes.Buffer) + buildArgs.WarnOnUnusedBuildArgs(buffer) + out := buffer.String() + assert.NotContains(t, out, "ThisArgIsUsed") + assert.NotContains(t, out, "HTTPS_PROXY") + assert.NotContains(t, out, "HTTP_PROXY") + assert.Contains(t, out, "ThisArgIsNotUsed") +} + +func TestIsUnreferencedBuiltin(t *testing.T) { + buildArgs := newBuildArgs(map[string]*string{ + "ThisArgIsUsed": strPtr("fromopt1"), + "ThisArgIsNotUsed": strPtr("fromopt2"), + "HTTPS_PROXY": strPtr("referenced builtin"), + "HTTP_PROXY": strPtr("unreferenced builtin"), + }) + buildArgs.AddArg("ThisArgIsUsed", nil) + buildArgs.AddArg("HTTPS_PROXY", nil) + + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsUsed")) + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsNotUsed")) + assert.True(t, buildArgs.IsReferencedOrNotBuiltin("HTTPS_PROXY")) + assert.False(t, buildArgs.IsReferencedOrNotBuiltin("HTTP_PROXY")) +} diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go new file mode 100644 index 0000000..cee1436 --- /dev/null +++ b/builder/dockerfile/builder.go @@ -0,0 +1,421 @@ +package dockerfile + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/sync/syncmap" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "healthcheck": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// SessionGetter is object used to get access to a session by uuid +type SessionGetter interface { + Get(ctx context.Context, uuid string) (session.Caller, error) +} + +// BuildManager is shared across all Builder objects +type BuildManager struct { + archiver *archive.Archiver + backend builder.Backend + pathCache pathCache // TODO: make this persistent + sg SessionGetter + fsCache *fscache.FSCache +} + +// NewBuildManager creates a BuildManager +func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) { + bm := &BuildManager{ + backend: b, + pathCache: &syncmap.Map{}, + sg: sg, + archiver: chrootarchive.NewArchiver(idMappings), + fsCache: fsCache, + } + if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil { + return nil, err + } + return bm, nil +} + +// Build starts a new build from a BuildConfig +func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) { + buildsTriggered.Inc() + if config.Options.Dockerfile == "" { + config.Options.Dockerfile = builder.DefaultDockerfileName + } + + source, dockerfile, err := remotecontext.Detect(config) + if err != nil { + return nil, err + } + defer func() { + if source != nil { + if err := source.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + } + }() + + // TODO @jhowardmsft LCOW support - this will require rework to allow both linux and Windows simultaneously. + // This is an interim solution to hardcode to linux if LCOW is turned on. + if dockerfile.Platform == "" { + dockerfile.Platform = runtime.GOOS + if dockerfile.Platform == "windows" && system.LCOWSupported() { + dockerfile.Platform = "linux" + } + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil { + return nil, err + } else if src != nil { + source = src + } + + builderOptions := builderOptions{ + Options: config.Options, + ProgressWriter: config.ProgressWriter, + Backend: bm.backend, + PathCache: bm.pathCache, + Archiver: bm.archiver, + Platform: dockerfile.Platform, + } + + return newBuilder(ctx, builderOptions).build(source, dockerfile) +} + +func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) { + if options.SessionID == "" || bm.sg == nil { + return nil, nil + } + logrus.Debug("client is session enabled") + + ctx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout) + defer cancelCtx() + + c, err := bm.sg.Get(ctx, options.SessionID) + if err != nil { + return nil, err + } + go func() { + <-c.Context().Done() + cancel() + }() + if options.RemoteContext == remotecontext.ClientSessionRemote { + st := time.Now() + csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, + options.SessionID, []string{"/"}) + if err != nil { + return nil, err + } + src, err := bm.fsCache.SyncFrom(ctx, csi) + if err != nil { + return nil, err + } + logrus.Debugf("sync-time: %v", time.Since(st)) + return src, nil + } + return nil, nil +} + +// builderOptions are the dependencies required by the builder +type builderOptions struct { + Options *types.ImageBuildOptions + Backend builder.Backend + ProgressWriter backend.ProgressWriter + PathCache pathCache + Archiver *archive.Archiver + Platform string +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Aux *streamformatter.AuxFormatter + Output io.Writer + + docker builder.Backend + clientCtx context.Context + + archiver *archive.Archiver + buildStages *buildStages + disableCommit bool + buildArgs *buildArgs + imageSources *imageSources + pathCache pathCache + containerManager *containerManager + imageProber ImageProber + + // TODO @jhowardmft LCOW Support. This will be moved to options at a later + // stage, however that cannot be done now as it affects the public API + // if it were. + platform string +} + +// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. +// TODO @jhowardmsft LCOW support: Eventually platform can be moved into the builder +// options, however, that would be an API change as it shares types.ImageBuildOptions. +func newBuilder(clientCtx context.Context, options builderOptions) *Builder { + config := options.Options + if config == nil { + config = new(types.ImageBuildOptions) + } + + // @jhowardmsft LCOW Support. For the time being, this is interim. Eventually + // will be moved to types.ImageBuildOptions, but it can't for now as that would + // be an API change. + if options.Platform == "" { + options.Platform = runtime.GOOS + } + if options.Platform == "windows" && system.LCOWSupported() { + options.Platform = "linux" + } + + b := &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: options.ProgressWriter.StdoutFormatter, + Stderr: options.ProgressWriter.StderrFormatter, + Aux: options.ProgressWriter.AuxFormatter, + Output: options.ProgressWriter.Output, + docker: options.Backend, + archiver: options.Archiver, + buildArgs: newBuildArgs(config.BuildArgs), + buildStages: newBuildStages(), + imageSources: newImageSources(clientCtx, options), + pathCache: options.PathCache, + imageProber: newImageProber(options.Backend, config.CacheFrom, options.Platform, config.NoCache), + containerManager: newContainerManager(options.Backend), + platform: options.Platform, + } + + return b +} + +// Build runs the Dockerfile builder by parsing the Dockerfile and executing +// the instructions from the file. +func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { + defer b.imageSources.Unmount() + + addNodesForLabelOption(dockerfile.AST, b.options.Labels) + + if err := checkDispatchDockerfile(dockerfile.AST); err != nil { + buildsFailed.WithValues(metricsDockerfileSyntaxError).Inc() + return nil, err + } + + dispatchState, err := b.dispatchDockerfileWithCancellation(dockerfile, source) + if err != nil { + return nil, err + } + + if b.options.Target != "" && !dispatchState.isCurrentStage(b.options.Target) { + buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc() + return nil, errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target) + } + + dockerfile.PrintWarnings(b.Stderr) + b.buildArgs.WarnOnUnusedBuildArgs(b.Stderr) + + if dispatchState.imageID == "" { + buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() + return nil, errors.New("No image was generated. Is your Dockerfile empty?") + } + return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil +} + +func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { + if aux == nil || state.imageID == "" { + return nil + } + return aux.Emit(types.BuildResult{ID: state.imageID}) +} + +func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result, source builder.Source) (*dispatchState, error) { + shlex := NewShellLex(dockerfile.EscapeToken) + state := newDispatchState() + total := len(dockerfile.AST.Children) + var err error + for i, n := range dockerfile.AST.Children { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprint(b.Stdout, "Build cancelled") + buildsFailed.WithValues(metricsBuildCanceled).Inc() + return nil, errors.New("Build cancelled") + default: + // Not cancelled yet, keep going... + } + + // If this is a FROM and we have a previous image then + // emit an aux message for that image since it is the + // end of the previous stage + if n.Value == command.From { + if err := emitImageID(b.Aux, state); err != nil { + return nil, err + } + } + + if n.Value == command.From && state.isCurrentStage(b.options.Target) { + break + } + + opts := dispatchOptions{ + state: state, + stepMsg: formatStep(i, total), + node: n, + shlex: shlex, + source: source, + } + if state, err = b.dispatch(opts); err != nil { + if b.options.ForceRemove { + b.containerManager.RemoveAll(b.Stdout) + } + return nil, err + } + + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(state.imageID)) + if b.options.Remove { + b.containerManager.RemoveAll(b.Stdout) + } + } + + // Emit a final aux message for the final image + if err := emitImageID(b.Aux, state); err != nil { + return nil, err + } + + return state, nil +} + +func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) { + if len(labels) == 0 { + return + } + + node := parser.NodeFromLabels(labels) + dockerfile.Children = append(dockerfile.Children, node) +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + if len(changes) == 0 { + return config, nil + } + + b := newBuilder(context.Background(), builderOptions{ + Options: &types.ImageBuildOptions{NoCache: true}, + }) + + dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, err + } + + // TODO @jhowardmsft LCOW support. For now, if LCOW enabled, switch to linux. + // Also explicitly set the platform. Ultimately this will be in the builder + // options, but we can't do that yet as it would change the API. + if dockerfile.Platform == "" { + dockerfile.Platform = runtime.GOOS + } + if dockerfile.Platform == "windows" && system.LCOWSupported() { + dockerfile.Platform = "linux" + } + b.platform = dockerfile.Platform + + // ensure that the commands are valid + for _, n := range dockerfile.AST.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + if err := checkDispatchDockerfile(dockerfile.AST); err != nil { + return nil, err + } + dispatchState := newDispatchState() + dispatchState.runConfig = config + return dispatchFromDockerfile(b, dockerfile, dispatchState, nil) +} + +func checkDispatchDockerfile(dockerfile *parser.Node) error { + for _, n := range dockerfile.Children { + if err := checkDispatch(n); err != nil { + return errors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine) + } + } + return nil +} + +func dispatchFromDockerfile(b *Builder, result *parser.Result, dispatchState *dispatchState, source builder.Source) (*container.Config, error) { + shlex := NewShellLex(result.EscapeToken) + ast := result.AST + total := len(ast.Children) + + for i, n := range ast.Children { + opts := dispatchOptions{ + state: dispatchState, + stepMsg: formatStep(i, total), + node: n, + shlex: shlex, + source: source, + } + if _, err := b.dispatch(opts); err != nil { + return nil, err + } + } + return dispatchState.runConfig, nil +} diff --git a/builder/dockerfile/builder_test.go b/builder/dockerfile/builder_test.go new file mode 100644 index 0000000..5fedca0 --- /dev/null +++ b/builder/dockerfile/builder_test.go @@ -0,0 +1,34 @@ +package dockerfile + +import ( + "strings" + "testing" + + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/stretchr/testify/assert" +) + +func TestAddNodesForLabelOption(t *testing.T) { + dockerfile := "FROM scratch" + result, err := parser.Parse(strings.NewReader(dockerfile)) + assert.NoError(t, err) + + labels := map[string]string{ + "org.e": "cli-e", + "org.d": "cli-d", + "org.c": "cli-c", + "org.b": "cli-b", + "org.a": "cli-a", + } + nodes := result.AST + addNodesForLabelOption(nodes, labels) + + expected := []string{ + "FROM scratch", + `LABEL "org.a"='cli-a' "org.b"='cli-b' "org.c"='cli-c' "org.d"='cli-d' "org.e"='cli-e'`, + } + assert.Len(t, nodes.Children, 2) + for i, v := range nodes.Children { + assert.Equal(t, expected[i], v.Original) + } +} diff --git a/builder/dockerfile/builder_unix.go b/builder/dockerfile/builder_unix.go new file mode 100644 index 0000000..5ea63da --- /dev/null +++ b/builder/dockerfile/builder_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package dockerfile + +func defaultShellForPlatform(platform string) []string { + return []string{"/bin/sh", "-c"} +} diff --git a/builder/dockerfile/builder_windows.go b/builder/dockerfile/builder_windows.go new file mode 100644 index 0000000..7bfef32 --- /dev/null +++ b/builder/dockerfile/builder_windows.go @@ -0,0 +1,8 @@ +package dockerfile + +func defaultShellForPlatform(platform string) []string { + if platform == "linux" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/builder/dockerfile/clientsession.go b/builder/dockerfile/clientsession.go new file mode 100644 index 0000000..647e453 --- /dev/null +++ b/builder/dockerfile/clientsession.go @@ -0,0 +1,78 @@ +package dockerfile + +import ( + "time" + + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session" + "github.com/docker/docker/client/session/filesync" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +const sessionConnectTimeout = 5 * time.Second + +// ClientSessionTransport is a transport for copying files from docker client +// to the daemon. +type ClientSessionTransport struct{} + +// NewClientSessionTransport returns new ClientSessionTransport instance +func NewClientSessionTransport() *ClientSessionTransport { + return &ClientSessionTransport{} +} + +// Copy data from a remote to a destination directory. +func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error { + csi, ok := id.(*ClientSessionSourceIdentifier) + if !ok { + return errors.New("invalid identifier for client session") + } + + return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{ + SrcPaths: csi.srcPaths, + DestDir: dest, + CacheUpdater: cu, + }) +} + +// ClientSessionSourceIdentifier is an identifier that can be used for requesting +// files from remote client +type ClientSessionSourceIdentifier struct { + srcPaths []string + caller session.Caller + sharedKey string + uuid string +} + +// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance +func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string, sources []string) (*ClientSessionSourceIdentifier, error) { + csi := &ClientSessionSourceIdentifier{ + uuid: uuid, + srcPaths: sources, + } + caller, err := sg.Get(ctx, uuid) + if err != nil { + return nil, errors.Wrapf(err, "failed to get session for %s", uuid) + } + + csi.caller = caller + return csi, nil +} + +// Transport returns transport identifier for remote identifier +func (csi *ClientSessionSourceIdentifier) Transport() string { + return remotecontext.ClientSessionRemote +} + +// SharedKey returns shared key for remote identifier. Shared key is used +// for finding the base for a repeated transfer. +func (csi *ClientSessionSourceIdentifier) SharedKey() string { + return csi.caller.SharedKey() +} + +// Key returns unique key for remote identifier. Requests with same key return +// same data. +func (csi *ClientSessionSourceIdentifier) Key() string { + return csi.uuid +} diff --git a/builder/dockerfile/command/command.go b/builder/dockerfile/command/command.go new file mode 100644 index 0000000..f23c687 --- /dev/null +++ b/builder/dockerfile/command/command.go @@ -0,0 +1,46 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Add = "add" + Arg = "arg" + Cmd = "cmd" + Copy = "copy" + Entrypoint = "entrypoint" + Env = "env" + Expose = "expose" + From = "from" + Healthcheck = "healthcheck" + Label = "label" + Maintainer = "maintainer" + Onbuild = "onbuild" + Run = "run" + Shell = "shell" + StopSignal = "stopsignal" + User = "user" + Volume = "volume" + Workdir = "workdir" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Add: {}, + Arg: {}, + Cmd: {}, + Copy: {}, + Entrypoint: {}, + Env: {}, + Expose: {}, + From: {}, + Healthcheck: {}, + Label: {}, + Maintainer: {}, + Onbuild: {}, + Run: {}, + Shell: {}, + StopSignal: {}, + User: {}, + Volume: {}, + Workdir: {}, +} diff --git a/builder/dockerfile/containerbackend.go b/builder/dockerfile/containerbackend.go new file mode 100644 index 0000000..7b241f3 --- /dev/null +++ b/builder/dockerfile/containerbackend.go @@ -0,0 +1,144 @@ +package dockerfile + +import ( + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type containerManager struct { + tmpContainers map[string]struct{} + backend builder.ExecBackend +} + +// newContainerManager creates a new container backend +func newContainerManager(docker builder.ExecBackend) *containerManager { + return &containerManager{ + backend: docker, + tmpContainers: make(map[string]struct{}), + } +} + +// Create a container +func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig, platform string) (container.ContainerCreateCreatedBody, error) { + container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfig, + HostConfig: hostConfig, + Platform: platform, + }) + if err != nil { + return container, err + } + c.tmpContainers[container.ID] = struct{}{} + return container, nil +} + +var errCancelled = errors.New("build cancelled") + +// Run a container by ID +func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) { + attached := make(chan struct{}) + errCh := make(chan error) + go func() { + errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached) + }() + select { + case err := <-errCh: + return err + case <-attached: + } + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-ctx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + c.backend.ContainerKill(cID, 0) + c.removeContainer(cID, stdout) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error()) + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from errCh: "+err.Error()) + return err + } + + waitC, err := c.backend.ContainerWait(ctx, cID, containerpkg.WaitConditionNotRunning) + if err != nil { + close(finished) + logCancellationError(cancelErrCh, fmt.Sprintf("unable to begin ContainerWait: %s", err)) + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + close(finished) + logCancellationError(cancelErrCh, + fmt.Sprintf("a non-zero code from ContainerWait: %d", status.ExitCode())) + return &statusCodeError{code: status.ExitCode(), err: err} + } + + close(finished) + return <-cancelErrCh +} + +func logCancellationError(cancelErrCh chan error, msg string) { + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v): ", cancelErr, msg) + } +} + +type statusCodeError struct { + code int + err error +} + +func (e *statusCodeError) Error() string { + return e.err.Error() +} + +func (e *statusCodeError) StatusCode() int { + return e.code +} + +func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := c.backend.ContainerRm(containerID, rmConfig); err != nil { + fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) + return err + } + return nil +} + +// RemoveAll containers managed by this container manager +func (c *containerManager) RemoveAll(stdout io.Writer) { + for containerID := range c.tmpContainers { + if err := c.removeContainer(containerID, stdout); err != nil { + return + } + delete(c.tmpContainers, containerID) + fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID)) + } +} diff --git a/builder/dockerfile/copy.go b/builder/dockerfile/copy.go new file mode 100644 index 0000000..c7db943 --- /dev/null +++ b/builder/dockerfile/copy.go @@ -0,0 +1,444 @@ +package dockerfile + +import ( + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type pathCache interface { + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) +} + +// copyInfo is a data object which stores the metadata about each source file in +// a copyInstruction +type copyInfo struct { + root string + path string + hash string + noDecompress bool +} + +func (c copyInfo) fullPath() (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root) +} + +func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { + return copyInfo{root: source.Root(), path: path, hash: hash} +} + +func newCopyInfos(copyInfos ...copyInfo) []copyInfo { + return copyInfos +} + +// copyInstruction is a fully parsed COPY or ADD command that is passed to +// Builder.performCopy to copy files into the image filesystem +type copyInstruction struct { + cmdName string + infos []copyInfo + dest string + allowLocalDecompression bool +} + +// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, +// and creates a copyInstruction +type copier struct { + imageSource *imageMount + source builder.Source + pathCache pathCache + download sourceDownloader + tmpPaths []string +} + +func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { + return copier{ + source: req.source, + pathCache: req.builder.pathCache, + download: download, + imageSource: imageSource, + } +} + +func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { + inst := copyInstruction{cmdName: cmdName} + last := len(args) - 1 + + // Work in daemon-specific filepath semantics + inst.dest = filepath.FromSlash(args[last]) + + infos, err := o.getCopyInfosForSourcePaths(args[0:last]) + if err != nil { + return inst, errors.Wrapf(err, "%s failed", cmdName) + } + if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) { + return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + inst.infos = infos + return inst, nil +} + +// getCopyInfosForSourcePaths iterates over the source files and calculate the info +// needed to copy (e.g. hash value if cached) +func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) { + var infos []copyInfo + for _, orig := range sources { + subinfos, err := o.getCopyInfoForSourcePath(orig) + if err != nil { + return nil, err + } + infos = append(infos, subinfos...) + } + + if len(infos) == 0 { + return nil, errors.New("no source files were specified") + } + return infos, nil +} + +func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) { + if !urlutil.IsURL(orig) { + return o.calcCopyInfo(orig, true) + } + remote, path, err := o.download(orig) + if err != nil { + return nil, err + } + o.tmpPaths = append(o.tmpPaths, remote.Root()) + + hash, err := remote.Hash(path) + ci := newCopyInfoFromSource(remote, path, hash) + ci.noDecompress = true // data from http shouldn't be extracted even on ADD + return newCopyInfos(ci), err +} + +// Cleanup removes any temporary directories created as part of downloading +// remote files. +func (o *copier) Cleanup() { + for _, path := range o.tmpPaths { + os.RemoveAll(path) + } + o.tmpPaths = []string{} +} + +// TODO: allowWildcards can probably be removed by refactoring this function further. +func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { + imageSource := o.imageSource + if err := validateCopySourcePath(imageSource, origPath); err != nil { + return nil, err + } + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + origPath = strings.TrimPrefix(origPath, string(os.PathSeparator)) + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // TODO: do this when creating copier. Requires validateCopySourcePath + // (and other below) to be aware of the difference sources. Why is it only + // done on image Source? + if imageSource != nil { + var err error + o.source, err = imageSource.Source() + if err != nil { + return nil, errors.Wrapf(err, "failed to copy from %s", imageSource.ImageID()) + } + } + + if o.source == nil { + return nil, errors.Errorf("missing build context") + } + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + return o.copyWithWildcards(origPath) + } + + if imageSource != nil && imageSource.ImageID() != "" { + // return a cached copy if one exists + if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil + } + } + + // Deal with the single file case + copyInfo, err := copyInfoForFile(o.source, origPath) + switch { + case err != nil: + return nil, err + case copyInfo.hash != "": + o.storeInPathCache(imageSource, origPath, copyInfo.hash) + return newCopyInfos(copyInfo), err + } + + // TODO: remove, handle dirs in Hash() + subfiles, err := walkSource(o.source, origPath) + if err != nil { + return nil, err + } + + hash := hashStringSlice("dir", subfiles) + o.storeInPathCache(imageSource, origPath, hash) + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil +} + +func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { + if im != nil { + o.pathCache.Store(im.ImageID()+path, hash) + } +} + +func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { + var copyInfos []copyInfo + if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(o.source.Root(), path) + if err != nil { + return err + } + + if rel == "." { + return nil + } + if match, _ := filepath.Match(origPath, rel); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := o.calcCopyInfo(rel, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil +} + +func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { + fi, err := remotecontext.StatAt(source, path) + if err != nil { + return copyInfo{}, err + } + + if fi.IsDir() { + return copyInfo{}, nil + } + hash, err := source.Hash(path) + if err != nil { + return copyInfo{}, err + } + return newCopyInfoFromSource(source, path, "file:"+hash), nil +} + +// TODO: dedupe with copyWithWildcards() +func walkSource(source builder.Source, origPath string) ([]string, error) { + fp, err := remotecontext.FullPath(source, origPath) + if err != nil { + return nil, err + } + // Must be a dir + var subfiles []string + err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(source.Root(), path) + if err != nil { + return err + } + if rel == "." { + return nil + } + hash, err := source.Hash(rel) + if err != nil { + return nil + } + // we already checked handleHash above + subfiles = append(subfiles, hash) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + return subfiles, nil +} + +type sourceDownloader func(string) (builder.Source, string, error) + +func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { + return func(url string) (builder.Source, string, error) { + return downloadSource(output, stdout, url) + } +} + +func errOnSourceDownload(_ string) (builder.Source, string, error) { + return nil, "", errors.New("source can't be a URL for COPY") +} + +func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { + u, err := url.Parse(srcURL) + if err != nil { + return + } + filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics + if filename == "" { + err = errors.Errorf("cannot determine filename from url: %s", u) + return + } + + resp, err := remotecontext.GetWithStatusError(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + progressOutput := streamformatter.NewJSONProgressOutput(output, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + // TODO: add filehash directly + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + // TODO: how important is this random blank line to the output? + fmt.Fprintln(stdout) + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + lc, err := remotecontext.NewLazySource(tmpDir) + return lc, filename, err +} + +type copyFileOptions struct { + decompress bool + archiver *archive.Archiver +} + +func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error { + srcPath, err := source.fullPath() + if err != nil { + return err + } + destPath, err := dest.fullPath() + if err != nil { + return err + } + + archiver := options.archiver + + src, err := os.Stat(srcPath) + if err != nil { + return errors.Wrapf(err, "source path not found") + } + if src.IsDir() { + return copyDirectory(archiver, srcPath, destPath) + } + if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress { + return archiver.UntarPath(srcPath, destPath) + } + + destExistsAsDir, err := isExistingDirectory(destPath) + if err != nil { + return err + } + // dest.path must be used because destPath has already been cleaned of any + // trailing slash + if endsInSlash(dest.path) || destExistsAsDir { + // source.path must be used to get the correct filename when the source + // is a symlink + destPath = filepath.Join(destPath, filepath.Base(source.path)) + } + return copyFile(archiver, srcPath, destPath) +} + +func copyDirectory(archiver *archive.Archiver, source, dest string) error { + if err := archiver.CopyWithTar(source, dest); err != nil { + return errors.Wrapf(err, "failed to copy directory") + } + return fixPermissions(source, dest, archiver.IDMappings.RootPair()) +} + +func copyFile(archiver *archive.Archiver, source, dest string) error { + rootIDs := archiver.IDMappings.RootPair() + + if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, rootIDs); err != nil { + return errors.Wrapf(err, "failed to create new directory") + } + if err := archiver.CopyFileWithTar(source, dest); err != nil { + return errors.Wrapf(err, "failed to copy file") + } + return fixPermissions(source, dest, rootIDs) +} + +func endsInSlash(path string) bool { + return strings.HasSuffix(path, string(os.PathSeparator)) +} + +// isExistingDirectory returns true if the path exists and is a directory +func isExistingDirectory(path string) (bool, error) { + destStat, err := os.Stat(path) + switch { + case os.IsNotExist(err): + return false, nil + case err != nil: + return false, err + } + return destStat.IsDir(), nil +} diff --git a/builder/dockerfile/copy_test.go b/builder/dockerfile/copy_test.go new file mode 100644 index 0000000..aee225b --- /dev/null +++ b/builder/dockerfile/copy_test.go @@ -0,0 +1,45 @@ +package dockerfile + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/stretchr/testify/assert" +) + +func TestIsExistingDirectory(t *testing.T) { + tmpfile := tempfile.NewTempFile(t, "file-exists-test", "something") + defer tmpfile.Remove() + tmpdir := tempfile.NewTempDir(t, "dir-exists-test") + defer tmpdir.Remove() + + var testcases = []struct { + doc string + path string + expected bool + }{ + { + doc: "directory exists", + path: tmpdir.Path, + expected: true, + }, + { + doc: "path doesn't exist", + path: "/bogus/path/does/not/exist", + expected: false, + }, + { + doc: "file exists", + path: tmpfile.Name(), + expected: false, + }, + } + + for _, testcase := range testcases { + result, err := isExistingDirectory(testcase.path) + if !assert.NoError(t, err) { + continue + } + assert.Equal(t, testcase.expected, result, testcase.doc) + } +} diff --git a/builder/dockerfile/copy_unix.go b/builder/dockerfile/copy_unix.go new file mode 100644 index 0000000..326d95b --- /dev/null +++ b/builder/dockerfile/copy_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" +) + +func fixPermissions(source, destination string, rootIDs idtools.IDPair) error { + skipChownRoot, err := isExistingDirectory(destination) + if err != nil { + return err + } + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if skipChownRoot && source == fullpath { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID) + }) +} diff --git a/builder/dockerfile/copy_windows.go b/builder/dockerfile/copy_windows.go new file mode 100644 index 0000000..78f5b09 --- /dev/null +++ b/builder/dockerfile/copy_windows.go @@ -0,0 +1,8 @@ +package dockerfile + +import "github.com/docker/docker/pkg/idtools" + +func fixPermissions(source, destination string, rootIDs idtools.IDPair) error { + // chown is not supported on Windows + return nil +} diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go new file mode 100644 index 0000000..1f74241 --- /dev/null +++ b/builder/dockerfile/dispatchers.go @@ -0,0 +1,884 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(req.args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + commitMessage := bytes.NewBufferString("ENV") + + for j := 0; j < len(req.args); j += 2 { + if len(req.args[j]) == 0 { + return errBlankCommandNames("ENV") + } + name := req.args[j] + value := req.args[j+1] + newVar := name + "=" + value + commitMessage.WriteString(" " + newVar) + + gotOne := false + for i, envVar := range runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + if equalEnvKeys(compareFrom, name) { + runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + runConfig.Env = append(runConfig.Env, newVar) + } + } + + return req.builder.commit(req.state, commitMessage.String()) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + maintainer := req.args[0] + req.state.maintainer = maintainer + return req.builder.commit(req.state, "MAINTAINER "+maintainer) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(req.args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + runConfig := req.state.runConfig + + if runConfig.Labels == nil { + runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(req.args); j++ { + name := req.args[j] + if name == "" { + return errBlankCommandNames("LABEL") + } + + value := req.args[j+1] + commitStr += " " + name + "=" + value + + runConfig.Labels[name] = value + j++ + } + return req.builder.commit(req.state, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(req dispatchRequest) error { + if len(req.args) < 2 { + return errAtLeastTwoArguments("ADD") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + downloader := newRemoteSourceDownloader(req.builder.Output, req.builder.Stdout) + copier := copierFromDispatchRequest(req, downloader, nil) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(req.args, "ADD") + if err != nil { + return err + } + copyInstruction.allowLocalDecompression = true + + return req.builder.performCopy(req.state, copyInstruction) +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(req dispatchRequest) error { + if len(req.args) < 2 { + return errAtLeastTwoArguments("COPY") + } + + flFrom := req.flags.AddString("from", "") + if err := req.flags.Parse(); err != nil { + return err + } + + im, err := req.builder.getImageMount(flFrom) + if err != nil { + return errors.Wrapf(err, "invalid from flag value %s", flFrom.Value) + } + + copier := copierFromDispatchRequest(req, errOnSourceDownload, im) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(req.args, "COPY") + if err != nil { + return err + } + + return req.builder.performCopy(req.state, copyInstruction) +} + +func (b *Builder) getImageMount(fromFlag *Flag) (*imageMount, error) { + if !fromFlag.IsUsed() { + // TODO: this could return the source in the default case as well? + return nil, nil + } + + var localOnly bool + imageRefOrID := fromFlag.Value + stage, err := b.buildStages.get(fromFlag.Value) + if err != nil { + return nil, err + } + if stage != nil { + imageRefOrID = stage.ImageID() + localOnly = true + } + return b.imageSources.Get(imageRefOrID, localOnly) +} + +// FROM imagename[:tag | @digest] [AS build-stage-name] +// +func from(req dispatchRequest) error { + stageName, err := parseBuildStageName(req.args) + if err != nil { + return err + } + + if err := req.flags.Parse(); err != nil { + return err + } + + req.builder.imageProber.Reset() + image, err := req.builder.getFromImage(req.shlex, req.args[0]) + if err != nil { + return err + } + if err := req.builder.buildStages.add(stageName, image); err != nil { + return err + } + req.state.beginStage(stageName, image) + req.builder.buildArgs.ResetAllowed() + if image.ImageID() == "" { + // Typically this means they used "FROM scratch" + return nil + } + + return processOnBuild(req) +} + +func parseBuildStageName(args []string) (string, error) { + stageName := "" + switch { + case len(args) == 3 && strings.EqualFold(args[1], "as"): + stageName = strings.ToLower(args[2]) + if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { + return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName) + } + case len(args) != 1: + return "", errors.New("FROM requires either one or three arguments") + } + + return stageName, nil +} + +// scratchImage is used as a token for the empty base image. +var scratchImage builder.Image = &image.Image{} + +func (b *Builder) getFromImage(shlex *ShellLex, name string) (builder.Image, error) { + substitutionArgs := []string{} + for key, value := range b.buildArgs.GetAllMeta() { + substitutionArgs = append(substitutionArgs, key+"="+value) + } + + name, err := shlex.ProcessWord(name, substitutionArgs) + if err != nil { + return nil, err + } + + var localOnly bool + if stage, ok := b.buildStages.getByName(name); ok { + name = stage.ImageID() + localOnly = true + } + + // Windows cannot support a container with no base image unless it is LCOW. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + if b.platform == "windows" || (b.platform != "windows" && !system.LCOWSupported()) { + return nil, errors.New("Windows does not support FROM scratch") + } + } + return scratchImage, nil + } + imageMount, err := b.imageSources.Get(name, localOnly) + if err != nil { + return nil, err + } + return imageMount.Image(), nil +} + +func processOnBuild(req dispatchRequest) error { + dispatchState := req.state + // Process ONBUILD triggers if they exist + if nTriggers := len(dispatchState.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(req.builder.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := dispatchState.runConfig.OnBuild + dispatchState.runConfig.OnBuild = []string{} + + // Reset stdin settings as all build actions run without stdin + dispatchState.runConfig.OpenStdin = false + dispatchState.runConfig.StdinOnce = false + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + dockerfile, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for _, n := range dockerfile.AST.Children { + if err := checkDispatch(n); err != nil { + return err + } + + upperCasedCmd := strings.ToUpper(n.Value) + switch upperCasedCmd { + case "ONBUILD": + return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return errors.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) + } + } + + if _, err := dispatchFromDockerfile(req.builder, dockerfile, dispatchState, req.source); err != nil { + return err + } + } + return nil +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0])) + switch triggerInstruction { + case "ONBUILD": + return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + runConfig := req.state.runConfig + original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") + runConfig.OnBuild = append(runConfig.OnBuild, original) + return req.builder.commit(req.state, "ONBUILD "+original) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + err := req.flags.Parse() + if err != nil { + return err + } + + runConfig := req.state.runConfig + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + runConfig.WorkingDir, err = normaliseWorkdir(runConfig.WorkingDir, req.args[0]) + if err != nil { + return err + } + + // For performance reasons, we explicitly do a create/mkdir now + // This avoids having an unnecessary expensive mount/unmount calls + // (on Windows in particular) during each container create. + // Prior to 1.13, the mkdir was deferred and not executed at this step. + if req.builder.disableCommit { + // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". + // We've already updated the runConfig and that's enough. + return nil + } + + comment := "WORKDIR " + runConfig.WorkingDir + runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, req.builder.platform)) + containerID, err := req.builder.probeAndCreate(req.state, runConfigWithCommentCmd) + if err != nil || containerID == "" { + return err + } + if err := req.builder.docker.ContainerCreateWorkdir(containerID); err != nil { + return err + } + + return req.builder.commitContainer(req.state, containerID, runConfigWithCommentCmd) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux and LCOW) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(req dispatchRequest) error { + if !req.state.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to run") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + stateRunConfig := req.state.runConfig + args := handleJSONArgs(req.args, req.attributes) + if !req.attributes["json"] { + args = append(getShell(stateRunConfig, req.builder.platform), args...) + } + cmdFromArgs := strslice.StrSlice(args) + buildArgs := req.builder.buildArgs.FilterAllowed(stateRunConfig.Env) + + saveCmd := cmdFromArgs + if len(buildArgs) > 0 { + saveCmd = prependEnvOnCmd(req.builder.buildArgs, buildArgs, cmdFromArgs) + } + + runConfigForCacheProbe := copyRunConfig(stateRunConfig, + withCmd(saveCmd), + withEntrypointOverride(saveCmd, nil)) + hit, err := req.builder.probeCache(req.state, runConfigForCacheProbe) + if err != nil || hit { + return err + } + + runConfig := copyRunConfig(stateRunConfig, + withCmd(cmdFromArgs), + withEnv(append(stateRunConfig.Env, buildArgs...)), + withEntrypointOverride(saveCmd, strslice.StrSlice{""})) + + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + cID, err := req.builder.create(runConfig) + if err != nil { + return err + } + if err := req.builder.containerManager.Run(req.builder.clientCtx, cID, req.builder.Stdout, req.builder.Stderr); err != nil { + if err, ok := err.(*statusCodeError); ok { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf( + "The command '%s' returned a non-zero code: %d", + strings.Join(runConfig.Cmd, " "), err.StatusCode()), + Code: err.StatusCode(), + } + } + return err + } + + return req.builder.commitContainer(req.state, cID, runConfigForCacheProbe) +} + +// Derive the command to use for probeCache() and to commit in this container. +// Note that we only do this if there are any build-time env vars. Also, we +// use the special argument "|#" at the start of the args array. This will +// avoid conflicts with any RUN command since commands can not +// start with | (vertical bar). The "#" (number of build envs) is there to +// help ensure proper cache matches. We don't want a RUN command +// that starts with "foo=abc" to be considered part of a build-time env var. +// +// remove any unreferenced built-in args from the environment variables. +// These args are transparent so resulting image should be the same regardless +// of the value. +func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice { + var tmpBuildEnv []string + for _, env := range buildArgVars { + key := strings.SplitN(env, "=", 2)[0] + if buildArgs.IsReferencedOrNotBuiltin(key) { + tmpBuildEnv = append(tmpBuildEnv, env) + } + } + + sort.Strings(tmpBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) + return strslice.StrSlice(append(tmpEnv, cmd...)) +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + cmdSlice := handleJSONArgs(req.args, req.attributes) + if !req.attributes["json"] { + cmdSlice = append(getShell(runConfig, req.builder.platform), cmdSlice...) + } + + runConfig.Cmd = strslice.StrSlice(cmdSlice) + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + if err := req.builder.commit(req.state, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(req.args) != 0 { + req.state.cmdSet = true + } + + return nil +} + +// parseOptInterval(flag) is the duration of flag.Value, or 0 if +// empty. An error is reported if the value is given and less than minimum duration. +func parseOptInterval(f *Flag) (time.Duration, error) { + s := f.Value + if s == "" { + return 0, nil + } + d, err := time.ParseDuration(s) + if err != nil { + return 0, err + } + if d < time.Duration(container.MinimumDuration) { + return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) + } + return d, nil +} + +// HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func healthcheck(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("HEALTHCHECK") + } + runConfig := req.state.runConfig + typ := strings.ToUpper(req.args[0]) + args := req.args[1:] + if typ == "NONE" { + if len(args) != 0 { + return errors.New("HEALTHCHECK NONE takes no arguments") + } + test := strslice.StrSlice{typ} + runConfig.Healthcheck = &container.HealthConfig{ + Test: test, + } + } else { + if runConfig.Healthcheck != nil { + oldCmd := runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(req.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + } + } + + healthcheck := container.HealthConfig{} + + flInterval := req.flags.AddString("interval", "") + flTimeout := req.flags.AddString("timeout", "") + flStartPeriod := req.flags.AddString("start-period", "") + flRetries := req.flags.AddString("retries", "") + + if err := req.flags.Parse(); err != nil { + return err + } + + switch typ { + case "CMD": + cmdSlice := handleJSONArgs(args, req.attributes) + if len(cmdSlice) == 0 { + return errors.New("Missing command after HEALTHCHECK CMD") + } + + if !req.attributes["json"] { + typ = "CMD-SHELL" + } + + healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) + default: + return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + } + + interval, err := parseOptInterval(flInterval) + if err != nil { + return err + } + healthcheck.Interval = interval + + timeout, err := parseOptInterval(flTimeout) + if err != nil { + return err + } + healthcheck.Timeout = timeout + + startPeriod, err := parseOptInterval(flStartPeriod) + if err != nil { + return err + } + healthcheck.StartPeriod = startPeriod + + if flRetries.Value != "" { + retries, err := strconv.ParseInt(flRetries.Value, 10, 32) + if err != nil { + return err + } + if retries < 1 { + return fmt.Errorf("--retries must be at least 1 (not %d)", retries) + } + healthcheck.Retries = int(retries) + } else { + healthcheck.Retries = 0 + } + + runConfig.Healthcheck = &healthcheck + } + + return req.builder.commit(req.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint +// is initialized at newBuilder time instead of through argument parsing. +// +func entrypoint(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + parsed := handleJSONArgs(req.args, req.attributes) + + switch { + case req.attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + runConfig.Entrypoint = strslice.StrSlice(append(getShell(runConfig, req.builder.platform), parsed[0])) + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !req.state.cmdSet { + runConfig.Cmd = nil + } + + return req.builder.commit(req.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// req.runConfig.ExposedPorts for runconfig. +// +func expose(req dispatchRequest) error { + portsTab := req.args + + if len(req.args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + if runConfig.ExposedPorts == nil { + runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := runConfig.ExposedPorts[port]; !exists { + runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return req.builder.commit(req.state, "EXPOSE "+strings.Join(portList, " ")) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + req.state.runConfig.User = req.args[0] + return req.builder.commit(req.state, fmt.Sprintf("USER %v", req.args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(req dispatchRequest) error { + if len(req.args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := req.flags.Parse(); err != nil { + return err + } + + runConfig := req.state.runConfig + if runConfig.Volumes == nil { + runConfig.Volumes = map[string]struct{}{} + } + for _, v := range req.args { + v = strings.TrimSpace(v) + if v == "" { + return errors.New("VOLUME specified can not be an empty string") + } + runConfig.Volumes[v] = struct{}{} + } + return req.builder.commit(req.state, fmt.Sprintf("VOLUME %v", req.args)) +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("STOPSIGNAL") + } + + sig := req.args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + req.state.runConfig.StopSignal = sig + return req.builder.commit(req.state, fmt.Sprintf("STOPSIGNAL %v", req.args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(req dispatchRequest) error { + if len(req.args) != 1 { + return errExactlyOneArgument("ARG") + } + + var ( + name string + newValue string + hasDefault bool + ) + + arg := req.args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + if len(parts[0]) == 0 { + return errBlankCommandNames("ARG") + } + + name = parts[0] + newValue = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + + var value *string + if hasDefault { + value = &newValue + } + req.builder.buildArgs.AddArg(name, value) + + // Arg before FROM doesn't add a layer + if !req.state.hasFromImage() { + req.builder.buildArgs.AddMetaArg(name, value) + return nil + } + return req.builder.commit(req.state, "ARG "+arg) +} + +// SHELL powershell -command +// +// Set the non-default shell to use. +func shell(req dispatchRequest) error { + if err := req.flags.Parse(); err != nil { + return err + } + shellSlice := handleJSONArgs(req.args, req.attributes) + switch { + case len(shellSlice) == 0: + // SHELL [] + return errAtLeastOneArgument("SHELL") + case req.attributes["json"]: + // SHELL ["powershell", "-command"] + req.state.runConfig.Shell = strslice.StrSlice(shellSlice) + default: + // SHELL powershell -command - not JSON + return errNotJSON("SHELL", req.original) + } + return req.builder.commit(req.state, fmt.Sprintf("SHELL %v", shellSlice)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errAtLeastTwoArguments(command string) error { + return fmt.Errorf("%s requires at least two arguments", command) +} + +func errBlankCommandNames(command string) error { + return fmt.Errorf("%s names can not be blank", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} diff --git a/builder/dockerfile/dispatchers_test.go b/builder/dockerfile/dispatchers_test.go new file mode 100644 index 0000000..b3672fc --- /dev/null +++ b/builder/dockerfile/dispatchers_test.go @@ -0,0 +1,525 @@ +package dockerfile + +import ( + "fmt" + "runtime" + "testing" + + "bytes" + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type commandWithFunction struct { + name string + function func(args []string) error +} + +func withArgs(f dispatcher) func([]string) error { + return func(args []string) error { + return f(dispatchRequest{args: args}) + } +} + +func withBuilderAndArgs(builder *Builder, f dispatcher) func([]string) error { + return func(args []string) error { + return f(defaultDispatchReq(builder, args...)) + } +} + +func defaultDispatchReq(builder *Builder, args ...string) dispatchRequest { + return dispatchRequest{ + builder: builder, + args: args, + flags: NewBFlags(), + shlex: NewShellLex(parser.DefaultEscapeToken), + state: &dispatchState{runConfig: &container.Config{}}, + } +} + +func newBuilderWithMockBackend() *Builder { + mockBackend := &MockBackend{} + ctx := context.Background() + b := &Builder{ + options: &types.ImageBuildOptions{}, + docker: mockBackend, + buildArgs: newBuildArgs(make(map[string]*string)), + Stdout: new(bytes.Buffer), + clientCtx: ctx, + disableCommit: true, + imageSources: newImageSources(ctx, builderOptions{ + Options: &types.ImageBuildOptions{}, + Backend: mockBackend, + }), + buildStages: newBuildStages(), + imageProber: newImageProber(mockBackend, nil, runtime.GOOS, false), + containerManager: newContainerManager(mockBackend), + } + return b +} + +func TestCommandsExactlyOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"MAINTAINER", withArgs(maintainer)}, + {"WORKDIR", withArgs(workdir)}, + {"USER", withArgs(user)}, + {"STOPSIGNAL", withArgs(stopSignal)}, + } + + for _, command := range commands { + err := command.function([]string{}) + assert.EqualError(t, err, errExactlyOneArgument(command.name).Error()) + } +} + +func TestCommandsAtLeastOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", withArgs(env)}, + {"LABEL", withArgs(label)}, + {"ONBUILD", withArgs(onbuild)}, + {"HEALTHCHECK", withArgs(healthcheck)}, + {"EXPOSE", withArgs(expose)}, + {"VOLUME", withArgs(volume)}, + } + + for _, command := range commands { + err := command.function([]string{}) + assert.EqualError(t, err, errAtLeastOneArgument(command.name).Error()) + } +} + +func TestCommandsAtLeastTwoArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ADD", withArgs(add)}, + {"COPY", withArgs(dispatchCopy)}} + + for _, command := range commands { + err := command.function([]string{"arg1"}) + assert.EqualError(t, err, errAtLeastTwoArguments(command.name).Error()) + } +} + +func TestCommandsTooManyArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", withArgs(env)}, + {"LABEL", withArgs(label)}} + + for _, command := range commands { + err := command.function([]string{"arg1", "arg2", "arg3"}) + assert.EqualError(t, err, errTooManyArguments(command.name).Error()) + } +} + +func TestCommandsBlankNames(t *testing.T) { + builder := newBuilderWithMockBackend() + commands := []commandWithFunction{ + {"ENV", withBuilderAndArgs(builder, env)}, + {"LABEL", withBuilderAndArgs(builder, label)}, + } + + for _, command := range commands { + err := command.function([]string{"", ""}) + assert.EqualError(t, err, errBlankCommandNames(command.name).Error()) + } +} + +func TestEnv2Variables(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"var1", "val1", "var2", "val2"} + req := defaultDispatchReq(b, args...) + err := env(req) + require.NoError(t, err) + + expected := []string{ + fmt.Sprintf("%s=%s", args[0], args[1]), + fmt.Sprintf("%s=%s", args[2], args[3]), + } + assert.Equal(t, expected, req.state.runConfig.Env) +} + +func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"var1", "val1"} + req := defaultDispatchReq(b, args...) + req.state.runConfig.Env = []string{"var1=old", "var2=fromenv"} + err := env(req) + require.NoError(t, err) + + expected := []string{ + fmt.Sprintf("%s=%s", args[0], args[1]), + "var2=fromenv", + } + assert.Equal(t, expected, req.state.runConfig.Env) +} + +func TestMaintainer(t *testing.T) { + maintainerEntry := "Some Maintainer " + + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, maintainerEntry) + err := maintainer(req) + require.NoError(t, err) + assert.Equal(t, maintainerEntry, req.state.maintainer) +} + +func TestLabel(t *testing.T) { + labelName := "label" + labelValue := "value" + + labelEntry := []string{labelName, labelValue} + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, labelEntry...) + err := label(req) + require.NoError(t, err) + + require.Contains(t, req.state.runConfig.Labels, labelName) + assert.Equal(t, req.state.runConfig.Labels[labelName], labelValue) +} + +func TestFromScratch(t *testing.T) { + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, "scratch") + err := from(req) + + if runtime.GOOS == "windows" && !system.LCOWSupported() { + assert.EqualError(t, err, "Windows does not support FROM scratch") + return + } + + require.NoError(t, err) + assert.True(t, req.state.hasFromImage()) + assert.Equal(t, "", req.state.imageID) + // Windows does not set the default path. TODO @jhowardmsft LCOW support. This will need revisiting as we get further into the implementation + expected := "PATH=" + system.DefaultPathEnv(runtime.GOOS) + if runtime.GOOS == "windows" { + expected = "" + } + assert.Equal(t, []string{expected}, req.state.runConfig.Env) +} + +func TestFromWithArg(t *testing.T) { + tag, expected := ":sometag", "expectedthisid" + + getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) { + assert.Equal(t, "alpine"+tag, name) + return &mockImage{id: "expectedthisid"}, nil, nil + } + b := newBuilderWithMockBackend() + b.docker.(*MockBackend).getImageFunc = getImage + + require.NoError(t, arg(defaultDispatchReq(b, "THETAG="+tag))) + req := defaultDispatchReq(b, "alpine${THETAG}") + err := from(req) + + require.NoError(t, err) + assert.Equal(t, expected, req.state.imageID) + assert.Equal(t, expected, req.state.baseImage.ImageID()) + assert.Len(t, b.buildArgs.GetAllAllowed(), 0) + assert.Len(t, b.buildArgs.GetAllMeta(), 1) +} + +func TestFromWithUndefinedArg(t *testing.T) { + tag, expected := "sometag", "expectedthisid" + + getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) { + assert.Equal(t, "alpine", name) + return &mockImage{id: "expectedthisid"}, nil, nil + } + b := newBuilderWithMockBackend() + b.docker.(*MockBackend).getImageFunc = getImage + b.options.BuildArgs = map[string]*string{"THETAG": &tag} + + req := defaultDispatchReq(b, "alpine${THETAG}") + err := from(req) + require.NoError(t, err) + assert.Equal(t, expected, req.state.imageID) +} + +func TestFromMultiStageWithScratchNamedStage(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Windows does not support scratch") + } + b := newBuilderWithMockBackend() + req := defaultDispatchReq(b, "scratch", "AS", "base") + + require.NoError(t, from(req)) + assert.True(t, req.state.hasFromImage()) + + req.args = []string{"base"} + require.NoError(t, from(req)) + assert.True(t, req.state.hasFromImage()) +} + +func TestOnbuildIllegalTriggers(t *testing.T) { + triggers := []struct{ command, expectedError string }{ + {"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"}, + {"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"}, + {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} + + for _, trigger := range triggers { + b := newBuilderWithMockBackend() + + err := onbuild(defaultDispatchReq(b, trigger.command)) + testutil.ErrorContains(t, err, trigger.expectedError) + } +} + +func TestOnbuild(t *testing.T) { + b := newBuilderWithMockBackend() + + req := defaultDispatchReq(b, "ADD", ".", "/app/src") + req.original = "ONBUILD ADD . /app/src" + req.state.runConfig = &container.Config{} + + err := onbuild(req) + require.NoError(t, err) + assert.Equal(t, "ADD . /app/src", req.state.runConfig.OnBuild[0]) +} + +func TestWorkdir(t *testing.T) { + b := newBuilderWithMockBackend() + workingDir := "/app" + if runtime.GOOS == "windows" { + workingDir = "C:\app" + } + + req := defaultDispatchReq(b, workingDir) + err := workdir(req) + require.NoError(t, err) + assert.Equal(t, workingDir, req.state.runConfig.WorkingDir) +} + +func TestCmd(t *testing.T) { + b := newBuilderWithMockBackend() + command := "./executable" + + req := defaultDispatchReq(b, command) + err := cmd(req) + require.NoError(t, err) + + var expectedCommand strslice.StrSlice + if runtime.GOOS == "windows" { + expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command)) + } else { + expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) + } + + assert.Equal(t, expectedCommand, req.state.runConfig.Cmd) + assert.True(t, req.state.cmdSet) +} + +func TestHealthcheckNone(t *testing.T) { + b := newBuilderWithMockBackend() + + req := defaultDispatchReq(b, "NONE") + err := healthcheck(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Healthcheck) + assert.Equal(t, []string{"NONE"}, req.state.runConfig.Healthcheck.Test) +} + +func TestHealthcheckCmd(t *testing.T) { + b := newBuilderWithMockBackend() + + args := []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"} + req := defaultDispatchReq(b, args...) + err := healthcheck(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Healthcheck) + expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"} + assert.Equal(t, expectedTest, req.state.runConfig.Healthcheck.Test) +} + +func TestEntrypoint(t *testing.T) { + b := newBuilderWithMockBackend() + entrypointCmd := "/usr/sbin/nginx" + + req := defaultDispatchReq(b, entrypointCmd) + err := entrypoint(req) + require.NoError(t, err) + require.NotNil(t, req.state.runConfig.Entrypoint) + + var expectedEntrypoint strslice.StrSlice + if runtime.GOOS == "windows" { + expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd)) + } else { + expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) + } + assert.Equal(t, expectedEntrypoint, req.state.runConfig.Entrypoint) +} + +func TestExpose(t *testing.T) { + b := newBuilderWithMockBackend() + + exposedPort := "80" + req := defaultDispatchReq(b, exposedPort) + err := expose(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.ExposedPorts) + require.Len(t, req.state.runConfig.ExposedPorts, 1) + + portsMapping, err := nat.ParsePortSpec(exposedPort) + require.NoError(t, err) + assert.Contains(t, req.state.runConfig.ExposedPorts, portsMapping[0].Port) +} + +func TestUser(t *testing.T) { + b := newBuilderWithMockBackend() + userCommand := "foo" + + req := defaultDispatchReq(b, userCommand) + err := user(req) + require.NoError(t, err) + assert.Equal(t, userCommand, req.state.runConfig.User) +} + +func TestVolume(t *testing.T) { + b := newBuilderWithMockBackend() + + exposedVolume := "/foo" + + req := defaultDispatchReq(b, exposedVolume) + err := volume(req) + require.NoError(t, err) + + require.NotNil(t, req.state.runConfig.Volumes) + assert.Len(t, req.state.runConfig.Volumes, 1) + assert.Contains(t, req.state.runConfig.Volumes, exposedVolume) +} + +func TestStopSignal(t *testing.T) { + b := newBuilderWithMockBackend() + signal := "SIGKILL" + + req := defaultDispatchReq(b, signal) + err := stopSignal(req) + require.NoError(t, err) + assert.Equal(t, signal, req.state.runConfig.StopSignal) +} + +func TestArg(t *testing.T) { + b := newBuilderWithMockBackend() + + argName := "foo" + argVal := "bar" + argDef := fmt.Sprintf("%s=%s", argName, argVal) + + err := arg(defaultDispatchReq(b, argDef)) + require.NoError(t, err) + + expected := map[string]string{argName: argVal} + assert.Equal(t, expected, b.buildArgs.GetAllAllowed()) +} + +func TestShell(t *testing.T) { + b := newBuilderWithMockBackend() + + shellCmd := "powershell" + req := defaultDispatchReq(b, shellCmd) + req.attributes = map[string]bool{"json": true} + + err := shell(req) + require.NoError(t, err) + + expectedShell := strslice.StrSlice([]string{shellCmd}) + assert.Equal(t, expectedShell, req.state.runConfig.Shell) +} + +func TestParseOptInterval(t *testing.T) { + flInterval := &Flag{ + name: "interval", + flagType: stringType, + Value: "50ns", + } + _, err := parseOptInterval(flInterval) + testutil.ErrorContains(t, err, "cannot be less than 1ms") + + flInterval.Value = "1ms" + _, err = parseOptInterval(flInterval) + require.NoError(t, err) +} + +func TestPrependEnvOnCmd(t *testing.T) { + buildArgs := newBuildArgs(nil) + buildArgs.AddArg("NO_PROXY", nil) + + args := []string{"sorted=nope", "args=not", "http_proxy=foo", "NO_PROXY=YA"} + cmd := []string{"foo", "bar"} + cmdWithEnv := prependEnvOnCmd(buildArgs, args, cmd) + expected := strslice.StrSlice([]string{ + "|3", "NO_PROXY=YA", "args=not", "sorted=nope", "foo", "bar"}) + assert.Equal(t, expected, cmdWithEnv) +} + +func TestRunWithBuildArgs(t *testing.T) { + b := newBuilderWithMockBackend() + b.buildArgs.argsFromOptions["HTTP_PROXY"] = strPtr("FOO") + b.disableCommit = false + + runConfig := &container.Config{} + origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"}) + cmdWithShell := strslice.StrSlice(append(getShell(runConfig, runtime.GOOS), "echo foo")) + envVars := []string{"|1", "one=two"} + cachedCmd := strslice.StrSlice(append(envVars, cmdWithShell...)) + + imageCache := &mockImageCache{ + getCacheFunc: func(parentID string, cfg *container.Config) (string, error) { + // Check the runConfig.Cmd sent to probeCache() + assert.Equal(t, cachedCmd, cfg.Cmd) + assert.Equal(t, strslice.StrSlice(nil), cfg.Entrypoint) + return "", nil + }, + } + + mockBackend := b.docker.(*MockBackend) + mockBackend.makeImageCacheFunc = func(_ []string, _ string) builder.ImageCache { + return imageCache + } + b.imageProber = newImageProber(mockBackend, nil, runtime.GOOS, false) + mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ReleaseableLayer, error) { + return &mockImage{ + id: "abcdef", + config: &container.Config{Cmd: origCmd}, + }, nil, nil + } + mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + // Check the runConfig.Cmd sent to create() + assert.Equal(t, cmdWithShell, config.Config.Cmd) + assert.Contains(t, config.Config.Env, "one=two") + assert.Equal(t, strslice.StrSlice{""}, config.Config.Entrypoint) + return container.ContainerCreateCreatedBody{ID: "12345"}, nil + } + mockBackend.commitFunc = func(cID string, cfg *backend.ContainerCommitConfig) (string, error) { + // Check the runConfig.Cmd sent to commit() + assert.Equal(t, origCmd, cfg.Config.Cmd) + assert.Equal(t, cachedCmd, cfg.ContainerConfig.Cmd) + assert.Equal(t, strslice.StrSlice(nil), cfg.Config.Entrypoint) + return "", nil + } + + req := defaultDispatchReq(b, "abcdef") + require.NoError(t, from(req)) + b.buildArgs.AddArg("one", strPtr("two")) + + req.args = []string{"echo foo"} + require.NoError(t, run(req)) + + // Check that runConfig.Cmd has not been modified by run + assert.Equal(t, origCmd, req.state.runConfig.Cmd) +} diff --git a/builder/dockerfile/dispatchers_unix.go b/builder/dockerfile/dispatchers_unix.go new file mode 100644 index 0000000..62ee371 --- /dev/null +++ b/builder/dockerfile/dispatchers_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package dockerfile + +import ( + "errors" + "fmt" + "os" + "path/filepath" +) + +// normaliseWorkdir normalises a user requested working directory in a +// platform semantically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalise nothing") + } + current = filepath.FromSlash(current) + requested = filepath.FromSlash(requested) + if !filepath.IsAbs(requested) { + return filepath.Join(string(os.PathSeparator), current, requested), nil + } + return requested, nil +} + +func errNotJSON(command, _ string) error { + return fmt.Errorf("%s requires the arguments to be in JSON form", command) +} + +// equalEnvKeys compare two strings and returns true if they are equal. On +// Windows this comparison is case insensitive. +func equalEnvKeys(from, to string) bool { + return from == to +} diff --git a/builder/dockerfile/dispatchers_unix_test.go b/builder/dockerfile/dispatchers_unix_test.go new file mode 100644 index 0000000..4aae6b4 --- /dev/null +++ b/builder/dockerfile/dispatchers_unix_test.go @@ -0,0 +1,33 @@ +// +build !windows + +package dockerfile + +import ( + "testing" +) + +func TestNormaliseWorkdir(t *testing.T) { + testCases := []struct{ current, requested, expected, expectedError string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `foo`, `/foo`, ``}, + {``, `/foo`, `/foo`, ``}, + {`/foo`, `bar`, `/foo/bar`, ``}, + {`/foo`, `/bar`, `/bar`, ``}, + } + + for _, test := range testCases { + normalised, err := normaliseWorkdir(test.current, test.requested) + + if test.expectedError != "" && err == nil { + t.Fatalf("NormaliseWorkdir should return an error %s, got nil", test.expectedError) + } + + if test.expectedError != "" && err.Error() != test.expectedError { + t.Fatalf("NormaliseWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error()) + } + + if normalised != test.expected { + t.Fatalf("NormaliseWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalised) + } + } +} diff --git a/builder/dockerfile/dispatchers_windows.go b/builder/dockerfile/dispatchers_windows.go new file mode 100644 index 0000000..71f7c92 --- /dev/null +++ b/builder/dockerfile/dispatchers_windows.go @@ -0,0 +1,93 @@ +package dockerfile + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/system" +) + +var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) + +// normaliseWorkdir normalises a user requested working directory in a +// platform semantically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalise nothing") + } + + // `filepath.Clean` will replace "" with "." so skip in that case + if current != "" { + current = filepath.Clean(current) + } + if requested != "" { + requested = filepath.Clean(requested) + } + + // If either current or requested in Windows is: + // C: + // C:. + // then an error will be thrown as the definition for the above + // refers to `current directory on drive C:` + // Since filepath.Clean() will automatically normalize the above + // to `C:.`, we only need to check the last format + if pattern.MatchString(current) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) + } + if pattern.MatchString(requested) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) + } + + // Target semantics is C:\somefolder, specifically in the format: + // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already + // guaranteed that `current`, if set, is consistent. This allows us to + // cope correctly with any of the following in a Dockerfile: + // WORKDIR a --> C:\a + // WORKDIR c:\\foo --> C:\foo + // WORKDIR \\foo --> C:\foo + // WORKDIR /foo --> C:\foo + // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar + // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar + if len(current) == 0 || system.IsAbs(requested) { + if (requested[0] == os.PathSeparator) || + (len(requested) > 1 && string(requested[1]) != ":") || + (len(requested) == 1) { + requested = filepath.Join(`C:\`, requested) + } + } else { + requested = filepath.Join(current, requested) + } + // Upper-case drive letter + return (strings.ToUpper(string(requested[0])) + requested[1:]), nil +} + +func errNotJSON(command, original string) error { + // For Windows users, give a hint if it looks like it might contain + // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], + // as JSON must be escaped. Unfortunate... + // + // Specifically looking for quote-driveletter-colon-backslash, there's no + // double backslash and a [] pair. No, this is not perfect, but it doesn't + // have to be. It's simply a hint to make life a little easier. + extra := "" + original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) + if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && + !strings.Contains(original, `\\`) && + strings.Contains(original, "[") && + strings.Contains(original, "]") { + extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) + } + return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) +} + +// equalEnvKeys compare two strings and returns true if they are equal. On +// Windows this comparison is case insensitive. +func equalEnvKeys(from, to string) bool { + return strings.ToUpper(from) == strings.ToUpper(to) +} diff --git a/builder/dockerfile/dispatchers_windows_test.go b/builder/dockerfile/dispatchers_windows_test.go new file mode 100644 index 0000000..3319c06 --- /dev/null +++ b/builder/dockerfile/dispatchers_windows_test.go @@ -0,0 +1,40 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseWorkdir(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `a`, `C:\a`, ``}, + {``, `c:\foo`, `C:\foo`, ``}, + {``, `c:\\foo`, `C:\foo`, ``}, + {``, `\foo`, `C:\foo`, ``}, + {``, `\\foo`, `C:\foo`, ``}, + {``, `/foo`, `C:\foo`, ``}, + {``, `C:/foo`, `C:\foo`, ``}, + {`C:\foo`, `bar`, `C:\foo\bar`, ``}, + {`C:\foo`, `/bar`, `C:\bar`, ``}, + {`C:\foo`, `\bar`, `C:\bar`, ``}, + } + for _, i := range tests { + r, e := normaliseWorkdir(i.current, i.requested) + + if i.etext != "" && e == nil { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested) + } + + if i.etext != "" && e.Error() != i.etext { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error()) + } + + if r != i.expected { + t.Fatalf("TestNormaliseWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r) + } + } +} diff --git a/builder/dockerfile/envVarTest b/builder/dockerfile/envVarTest new file mode 100644 index 0000000..946b278 --- /dev/null +++ b/builder/dockerfile/envVarTest @@ -0,0 +1,121 @@ +A|hello | hello +A|he'll'o | hello +A|he'llo | error +A|he\'llo | he'llo +A|he\\'llo | error +A|abc\tdef | abctdef +A|"abc\tdef" | abc\tdef +A|"abc\\tdef" | abc\tdef +A|'abc\tdef' | abc\tdef +A|hello\ | hello +A|hello\\ | hello\ +A|"hello | error +A|"hello\" | error +A|"hel'lo" | hel'lo +A|'hello | error +A|'hello\' | hello\ +A|'hello\there' | hello\there +A|'hello\\there' | hello\\there +A|"''" | '' +A|$. | $. +A|$1 | +A|he$1x | hex +A|he$.x | he$.x +# Next one is different on Windows as $pwd==$PWD +U|he$pwd. | he. +W|he$pwd. | he/home. +A|he$PWD | he/home +A|he\$PWD | he$PWD +A|he\\$PWD | he\/home +A|"he\$PWD" | he$PWD +A|"he\\$PWD" | he\/home +A|he\${} | he${} +A|he\${}xx | he${}xx +A|he${} | he +A|he${}xx | hexx +A|he${hi} | he +A|he${hi}xx | hexx +A|he${PWD} | he/home +A|he${.} | error +A|he${XXX:-000}xx | he000xx +A|he${PWD:-000}xx | he/homexx +A|he${XXX:-$PWD}xx | he/homexx +A|he${XXX:-${PWD:-yyy}}xx | he/homexx +A|he${XXX:-${YYY:-yyy}}xx | heyyyxx +A|he${XXX:YYY} | error +A|he${XXX:+${PWD}}xx | hexx +A|he${PWD:+${XXX}}xx | hexx +A|he${PWD:+${SHELL}}xx | hebashxx +A|he${XXX:+000}xx | hexx +A|he${PWD:+000}xx | he000xx +A|'he${XX}' | he${XX} +A|"he${PWD}" | he/home +A|"he'$PWD'" | he'/home' +A|"$PWD" | /home +A|'$PWD' | $PWD +A|'\$PWD' | \$PWD +A|'"hello"' | "hello" +A|he\$PWD | he$PWD +A|"he\$PWD" | he$PWD +A|'he\$PWD' | he\$PWD +A|he${PWD | error +A|he${PWD:=000}xx | error +A|he${PWD:+${PWD}:}xx | he/home:xx +A|he${XXX:-\$PWD:}xx | he$PWD:xx +A|he${XXX:-\${PWD}z}xx | he${PWDz}xx +A|안녕하세요 | 안녕하세요 +A|안'녕'하세요 | 안녕하세요 +A|안'녕하세요 | error +A|안녕\'하세요 | 안녕'하세요 +A|안\\'녕하세요 | error +A|안녕\t하세요 | 안녕t하세요 +A|"안녕\t하세요" | 안녕\t하세요 +A|'안녕\t하세요 | error +A|안녕하세요\ | 안녕하세요 +A|안녕하세요\\ | 안녕하세요\ +A|"안녕하세요 | error +A|"안녕하세요\" | error +A|"안녕'하세요" | 안녕'하세요 +A|'안녕하세요 | error +A|'안녕하세요\' | 안녕하세요\ +A|안녕$1x | 안녕x +A|안녕$.x | 안녕$.x +# Next one is different on Windows as $pwd==$PWD +U|안녕$pwd. | 안녕. +W|안녕$pwd. | 안녕/home. +A|안녕$PWD | 안녕/home +A|안녕\$PWD | 안녕$PWD +A|안녕\\$PWD | 안녕\/home +A|안녕\${} | 안녕${} +A|안녕\${}xx | 안녕${}xx +A|안녕${} | 안녕 +A|안녕${}xx | 안녕xx +A|안녕${hi} | 안녕 +A|안녕${hi}xx | 안녕xx +A|안녕${PWD} | 안녕/home +A|안녕${.} | error +A|안녕${XXX:-000}xx | 안녕000xx +A|안녕${PWD:-000}xx | 안녕/homexx +A|안녕${XXX:-$PWD}xx | 안녕/homexx +A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +A|안녕${XXX:YYY} | error +A|안녕${XXX:+${PWD}}xx | 안녕xx +A|안녕${PWD:+${XXX}}xx | 안녕xx +A|안녕${PWD:+${SHELL}}xx | 안녕bashxx +A|안녕${XXX:+000}xx | 안녕xx +A|안녕${PWD:+000}xx | 안녕000xx +A|'안녕${XX}' | 안녕${XX} +A|"안녕${PWD}" | 안녕/home +A|"안녕'$PWD'" | 안녕'/home' +A|'"안녕"' | "안녕" +A|안녕\$PWD | 안녕$PWD +A|"안녕\$PWD" | 안녕$PWD +A|'안녕\$PWD' | 안녕\$PWD +A|안녕${PWD | error +A|안녕${PWD:=000}xx | error +A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx +A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +A|$KOREAN | 한국어 +A|안녕$KOREAN | 안녕한국어 diff --git a/builder/dockerfile/evaluator.go b/builder/dockerfile/evaluator.go new file mode 100644 index 0000000..ba43159 --- /dev/null +++ b/builder/dockerfile/evaluator.go @@ -0,0 +1,327 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling newBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "bytes" + "fmt" + "runtime" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig/opts" + "github.com/pkg/errors" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +type dispatchRequest struct { + builder *Builder // TODO: replace this with a smaller interface + args []string + attributes map[string]bool + flags *BFlags + original string + shlex *ShellLex + state *dispatchState + source builder.Source +} + +func newDispatchRequestFromOptions(options dispatchOptions, builder *Builder, args []string) dispatchRequest { + return dispatchRequest{ + builder: builder, + args: args, + attributes: options.node.Attributes, + original: options.node.Original, + flags: NewBFlagsWithArgs(options.node.Flags), + shlex: options.shlex, + state: options.state, + source: options.source, + } +} + +type dispatcher func(dispatchRequest) error + +var evaluateTable map[string]dispatcher + +func init() { + evaluateTable = map[string]dispatcher{ + command.Add: add, + command.Arg: arg, + command.Cmd: cmd, + command.Copy: dispatchCopy, // copy() is a go builtin + command.Entrypoint: entrypoint, + command.Env: env, + command.Expose: expose, + command.From: from, + command.Healthcheck: healthcheck, + command.Label: label, + command.Maintainer: maintainer, + command.Onbuild: onbuild, + command.Run: run, + command.Shell: shell, + command.StopSignal: stopSignal, + command.User: user, + command.Volume: volume, + command.Workdir: workdir, + } +} + +func formatStep(stepN int, stepTotal int) string { + return fmt.Sprintf("%d/%d", stepN+1, stepTotal) +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(options dispatchOptions) (*dispatchState, error) { + node := options.node + cmd := node.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + buildsFailed.WithValues(metricsCommandNotSupportedError).Inc() + return nil, err + } + + msg := bytes.NewBufferString(fmt.Sprintf("Step %s : %s%s", + options.stepMsg, upperCasedCmd, formatFlags(node.Flags))) + + args := []string{} + ast := node + if cmd == command.Onbuild { + var err error + ast, args, err = handleOnBuildNode(node, msg) + if err != nil { + return nil, err + } + } + + runConfigEnv := options.state.runConfig.Env + envs := append(runConfigEnv, b.buildArgs.FilterAllowed(runConfigEnv)...) + processFunc := createProcessWordFunc(options.shlex, cmd, envs) + words, err := getDispatchArgsFromNode(ast, processFunc, msg) + if err != nil { + buildsFailed.WithValues(metricsErrorProcessingCommandsError).Inc() + return nil, err + } + args = append(args, words...) + + fmt.Fprintln(b.Stdout, msg.String()) + + f, ok := evaluateTable[cmd] + if !ok { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + return nil, fmt.Errorf("unknown instruction: %s", upperCasedCmd) + } + options.state.updateRunConfig() + err = f(newDispatchRequestFromOptions(options, b, args)) + return options.state, err +} + +type dispatchOptions struct { + state *dispatchState + stepMsg string + node *parser.Node + shlex *ShellLex + source builder.Source +} + +// dispatchState is a data object which is modified by dispatchers +type dispatchState struct { + runConfig *container.Config + maintainer string + cmdSet bool + imageID string + baseImage builder.Image + stageName string +} + +func newDispatchState() *dispatchState { + return &dispatchState{runConfig: &container.Config{}} +} + +func (s *dispatchState) updateRunConfig() { + s.runConfig.Image = s.imageID +} + +// hasFromImage returns true if the builder has processed a `FROM ` line +func (s *dispatchState) hasFromImage() bool { + return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") +} + +func (s *dispatchState) isCurrentStage(target string) bool { + if target == "" { + return false + } + return strings.EqualFold(s.stageName, target) +} + +func (s *dispatchState) beginStage(stageName string, image builder.Image) { + s.stageName = stageName + s.imageID = image.ImageID() + + if image.RunConfig() != nil { + s.runConfig = image.RunConfig() + } else { + s.runConfig = &container.Config{} + } + s.baseImage = image + s.setDefaultPath() +} + +// Add the default PATH to runConfig.ENV if one exists for the platform and there +// is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS +func (s *dispatchState) setDefaultPath() { + // TODO @jhowardmsft LCOW Support - This will need revisiting later + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + if system.DefaultPathEnv(platform) == "" { + return + } + envMap := opts.ConvertKVStringsToMap(s.runConfig.Env) + if _, ok := envMap["PATH"]; !ok { + s.runConfig.Env = append(s.runConfig.Env, "PATH="+system.DefaultPathEnv(platform)) + } +} + +func handleOnBuildNode(ast *parser.Node, msg *bytes.Buffer) (*parser.Node, []string, error) { + if ast.Next == nil { + return nil, nil, errors.New("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + msg.WriteString(" " + ast.Value + formatFlags(ast.Flags)) + return ast, []string{ast.Value}, nil +} + +func formatFlags(flags []string) string { + if len(flags) > 0 { + return " " + strings.Join(flags, " ") + } + return "" +} + +func getDispatchArgsFromNode(ast *parser.Node, processFunc processWordFunc, msg *bytes.Buffer) ([]string, error) { + args := []string{} + for i := 0; ast.Next != nil; i++ { + ast = ast.Next + words, err := processFunc(ast.Value) + if err != nil { + return nil, err + } + args = append(args, words...) + msg.WriteString(" " + ast.Value) + } + return args, nil +} + +type processWordFunc func(string) ([]string, error) + +func createProcessWordFunc(shlex *ShellLex, cmd string, envs []string) processWordFunc { + switch { + case !replaceEnvAllowed[cmd]: + return func(word string) ([]string, error) { + return []string{word}, nil + } + case allowWordExpansion[cmd]: + return func(word string) ([]string, error) { + return shlex.ProcessWords(word, envs) + } + default: + return func(word string) ([]string, error) { + word, err := shlex.ProcessWord(word, envs) + return []string{word}, err + } + } +} + +// checkDispatch does a simple check for syntax errors of the Dockerfile. +// Because some of the instructions can only be validated through runtime, +// arg, env, etc., this syntax check will not be complete and could not replace +// the runtime check. Instead, this function is only a helper that allows +// user to find out the obvious error in Dockerfile earlier on. +func checkDispatch(ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + // The instruction itself is ONBUILD, we will make sure it follows with at + // least one argument + if upperCasedCmd == "ONBUILD" { + if ast.Next == nil { + buildsFailed.WithValues(metricsMissingOnbuildArgumentsError).Inc() + return errors.New("ONBUILD requires at least one argument") + } + } + + if _, ok := evaluateTable[cmd]; ok { + return nil + } + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + return errors.Errorf("unknown instruction: %s", upperCasedCmd) +} diff --git a/builder/dockerfile/evaluator_test.go b/builder/dockerfile/evaluator_test.go new file mode 100644 index 0000000..72d7ce1 --- /dev/null +++ b/builder/dockerfile/evaluator_test.go @@ -0,0 +1,210 @@ +package dockerfile + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +type dispatchTestCase struct { + name, dockerfile, expectedError string + files map[string]string +} + +func init() { + reexec.Init() +} + +func initDispatchTestCases() []dispatchTestCase { + dispatchTestCases := []dispatchTestCase{{ + name: "copyEmptyWhitespace", + dockerfile: `COPY + quux \ + bar`, + expectedError: "COPY requires at least two arguments", + }, + { + name: "ONBUILD forbidden FROM", + dockerfile: "ONBUILD FROM scratch", + expectedError: "FROM isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ONBUILD forbidden MAINTAINER", + dockerfile: "ONBUILD MAINTAINER docker.io", + expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ARG two arguments", + dockerfile: "ARG foo bar", + expectedError: "ARG requires exactly one argument", + files: nil, + }, + { + name: "MAINTAINER unknown flag", + dockerfile: "MAINTAINER --boo joe@example.com", + expectedError: "Unknown flag: boo", + files: nil, + }, + { + name: "ADD multiple files to file", + dockerfile: "ADD file1.txt file2.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON ADD multiple files to file", + dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard ADD multiple files to file", + dockerfile: "ADD file*.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard JSON ADD multiple files to file", + dockerfile: `ADD ["file*.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file", + dockerfile: "COPY file1.txt file2.txt test", + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON COPY multiple files to file", + dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "ADD multiple files to file with whitespace", + dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file with whitespace", + dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY wildcard no files", + dockerfile: `COPY file*.txt /tmp/`, + expectedError: "COPY failed: no source files were specified", + files: nil, + }, + { + name: "COPY url", + dockerfile: `COPY https://index.docker.io/robots.txt /`, + expectedError: "source can't be a URL for COPY", + files: nil, + }, + { + name: "Chaining ONBUILD", + dockerfile: `ONBUILD ONBUILD RUN touch foobar`, + expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", + files: nil, + }, + { + name: "Invalid instruction", + dockerfile: `foo bar`, + expectedError: "unknown instruction: FOO", + files: nil, + }} + + return dispatchTestCases +} + +func TestDispatch(t *testing.T) { + testCases := initDispatchTestCases() + + for _, testCase := range testCases { + executeTestCase(t, testCase) + } +} + +func executeTestCase(t *testing.T, testCase dispatchTestCase) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + for filename, content := range testCase.files { + createTestTempFile(t, contextDir, filename, content, 0777) + } + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := remotecontext.FromArchive(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + r := strings.NewReader(testCase.dockerfile) + result, err := parser.Parse(r) + + if err != nil { + t.Fatalf("Error when parsing Dockerfile: %s", err) + } + + options := &types.ImageBuildOptions{ + BuildArgs: make(map[string]*string), + } + + b := &Builder{ + options: options, + Stdout: ioutil.Discard, + buildArgs: newBuildArgs(options.BuildArgs), + } + + shlex := NewShellLex(parser.DefaultEscapeToken) + n := result.AST + state := &dispatchState{runConfig: &container.Config{}} + opts := dispatchOptions{ + state: state, + stepMsg: formatStep(0, len(n.Children)), + node: n.Children[0], + shlex: shlex, + source: context, + } + state, err = b.dispatch(opts) + + if err == nil { + t.Fatalf("No error when executing test %s", testCase.name) + } + + if !strings.Contains(err.Error(), testCase.expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) + } + +} diff --git a/builder/dockerfile/evaluator_unix.go b/builder/dockerfile/evaluator_unix.go new file mode 100644 index 0000000..28fd5b1 --- /dev/null +++ b/builder/dockerfile/evaluator_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package dockerfile + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + return nil +} diff --git a/builder/dockerfile/evaluator_windows.go b/builder/dockerfile/evaluator_windows.go new file mode 100644 index 0000000..72483a2 --- /dev/null +++ b/builder/dockerfile/evaluator_windows.go @@ -0,0 +1,13 @@ +package dockerfile + +import "fmt" + +// platformSupports is gives users a quality error message if a Dockerfile uses +// a command not supported on the platform. +func platformSupports(command string) error { + switch command { + case "stopsignal": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/builder/dockerfile/imagecontext.go b/builder/dockerfile/imagecontext.go new file mode 100644 index 0000000..64b2572 --- /dev/null +++ b/builder/dockerfile/imagecontext.go @@ -0,0 +1,211 @@ +package dockerfile + +import ( + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + dockerimage "github.com/docker/docker/image" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type buildStage struct { + id string +} + +func newBuildStage(imageID string) *buildStage { + return &buildStage{id: imageID} +} + +func (b *buildStage) ImageID() string { + return b.id +} + +func (b *buildStage) update(imageID string) { + b.id = imageID +} + +// buildStages tracks each stage of a build so they can be retrieved by index +// or by name. +type buildStages struct { + sequence []*buildStage + byName map[string]*buildStage +} + +func newBuildStages() *buildStages { + return &buildStages{byName: make(map[string]*buildStage)} +} + +func (s *buildStages) getByName(name string) (*buildStage, bool) { + stage, ok := s.byName[strings.ToLower(name)] + return stage, ok +} + +func (s *buildStages) get(indexOrName string) (*buildStage, error) { + index, err := strconv.Atoi(indexOrName) + if err == nil { + if err := s.validateIndex(index); err != nil { + return nil, err + } + return s.sequence[index], nil + } + if im, ok := s.byName[strings.ToLower(indexOrName)]; ok { + return im, nil + } + return nil, nil +} + +func (s *buildStages) validateIndex(i int) error { + if i < 0 || i >= len(s.sequence)-1 { + if i == len(s.sequence)-1 { + return errors.New("refers to current build stage") + } + return errors.New("index out of bounds") + } + return nil +} + +func (s *buildStages) add(name string, image builder.Image) error { + stage := newBuildStage(image.ImageID()) + name = strings.ToLower(name) + if len(name) > 0 { + if _, ok := s.byName[name]; ok { + return errors.Errorf("duplicate name %s", name) + } + s.byName[name] = stage + } + s.sequence = append(s.sequence, stage) + return nil +} + +func (s *buildStages) update(imageID string) { + s.sequence[len(s.sequence)-1].update(imageID) +} + +type getAndMountFunc func(string, bool) (builder.Image, builder.ReleaseableLayer, error) + +// imageSources mounts images and provides a cache for mounted images. It tracks +// all images so they can be unmounted at the end of the build. +type imageSources struct { + byImageID map[string]*imageMount + mounts []*imageMount + getImage getAndMountFunc + cache pathCache // TODO: remove +} + +// TODO @jhowardmsft LCOW Support: Eventually, platform can be moved to options.Options.Platform, +// and removed from builderOptions, but that can't be done yet as it would affect the API. +func newImageSources(ctx context.Context, options builderOptions) *imageSources { + getAndMount := func(idOrRef string, localOnly bool) (builder.Image, builder.ReleaseableLayer, error) { + pullOption := backend.PullOptionNoPull + if !localOnly { + if options.Options.PullParent { + pullOption = backend.PullOptionForcePull + } else { + pullOption = backend.PullOptionPreferLocal + } + } + return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ + PullOption: pullOption, + AuthConfig: options.Options.AuthConfigs, + Output: options.ProgressWriter.Output, + Platform: options.Platform, + }) + } + + return &imageSources{ + byImageID: make(map[string]*imageMount), + getImage: getAndMount, + } +} + +func (m *imageSources) Get(idOrRef string, localOnly bool) (*imageMount, error) { + if im, ok := m.byImageID[idOrRef]; ok { + return im, nil + } + + image, layer, err := m.getImage(idOrRef, localOnly) + if err != nil { + return nil, err + } + im := newImageMount(image, layer) + m.Add(im) + return im, nil +} + +func (m *imageSources) Unmount() (retErr error) { + for _, im := range m.mounts { + if err := im.unmount(); err != nil { + logrus.Error(err) + retErr = err + } + } + return +} + +func (m *imageSources) Add(im *imageMount) { + switch im.image { + case nil: + im.image = &dockerimage.Image{} + default: + m.byImageID[im.image.ImageID()] = im + } + m.mounts = append(m.mounts, im) +} + +// imageMount is a reference to an image that can be used as a builder.Source +type imageMount struct { + image builder.Image + source builder.Source + layer builder.ReleaseableLayer +} + +func newImageMount(image builder.Image, layer builder.ReleaseableLayer) *imageMount { + im := &imageMount{image: image, layer: layer} + return im +} + +func (im *imageMount) Source() (builder.Source, error) { + if im.source == nil { + if im.layer == nil { + return nil, errors.Errorf("empty context") + } + mountPath, err := im.layer.Mount() + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s", im.image.ImageID()) + } + source, err := remotecontext.NewLazySource(mountPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to create lazycontext for %s", mountPath) + } + im.source = source + } + return im.source, nil +} + +func (im *imageMount) unmount() error { + if im.layer == nil { + return nil + } + if err := im.layer.Release(); err != nil { + return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) + } + im.layer = nil + return nil +} + +func (im *imageMount) Image() builder.Image { + return im.image +} + +func (im *imageMount) Layer() builder.ReleaseableLayer { + return im.layer +} + +func (im *imageMount) ImageID() string { + return im.image.ImageID() +} diff --git a/builder/dockerfile/imageprobe.go b/builder/dockerfile/imageprobe.go new file mode 100644 index 0000000..3433612 --- /dev/null +++ b/builder/dockerfile/imageprobe.go @@ -0,0 +1,63 @@ +package dockerfile + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" +) + +// ImageProber exposes an Image cache to the Builder. It supports resetting a +// cache. +type ImageProber interface { + Reset() + Probe(parentID string, runConfig *container.Config) (string, error) +} + +type imageProber struct { + cache builder.ImageCache + reset func() builder.ImageCache + cacheBusted bool +} + +func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, platform string, noCache bool) ImageProber { + if noCache { + return &nopProber{} + } + + reset := func() builder.ImageCache { + return cacheBuilder.MakeImageCache(cacheFrom, platform) + } + return &imageProber{cache: reset(), reset: reset} +} + +func (c *imageProber) Reset() { + c.cache = c.reset() + c.cacheBusted = false +} + +// Probe checks if cache match can be found for current build instruction. +// It returns the cachedID if there is a hit, and the empty string on miss +func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) { + if c.cacheBusted { + return "", nil + } + cacheID, err := c.cache.GetCache(parentID, runConfig) + if err != nil { + return "", err + } + if len(cacheID) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + c.cacheBusted = true + return "", nil + } + logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + return cacheID, nil +} + +type nopProber struct{} + +func (c *nopProber) Reset() {} + +func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) { + return "", nil +} diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go new file mode 100644 index 0000000..17511bc --- /dev/null +++ b/builder/dockerfile/internals.go @@ -0,0 +1,310 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +func (b *Builder) commit(dispatchState *dispatchState, comment string) error { + if b.disableCommit { + return nil + } + if !dispatchState.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to commit") + } + + runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, b.platform)) + hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + id, err := b.create(runConfigWithCommentCmd) + if err != nil { + return err + } + + return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) +} + +func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { + if b.disableCommit { + return nil + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Author: dispatchState.maintainer, + Pause: true, + // TODO: this should be done by Commit() + Config: copyRunConfig(dispatchState.runConfig), + }, + ContainerConfig: containerConfig, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + dispatchState.imageID = imageID + b.buildStages.update(imageID) + return nil +} + +func (b *Builder) exportImage(state *dispatchState, imageMount *imageMount, runConfig *container.Config) error { + newLayer, err := imageMount.Layer().Commit(b.platform) + if err != nil { + return err + } + + // add an image mount without an image so the layer is properly unmounted + // if there is an error before we can add the full mount with image + b.imageSources.Add(newImageMount(nil, newLayer)) + + parentImage, ok := imageMount.Image().(*image.Image) + if !ok { + return errors.Errorf("unexpected image type") + } + + newImage := image.NewChildImage(parentImage, image.ChildConfig{ + Author: state.maintainer, + ContainerConfig: runConfig, + DiffID: newLayer.DiffID(), + Config: copyRunConfig(state.runConfig), + }, parentImage.OS) + + // TODO: it seems strange to marshal this here instead of just passing in the + // image struct + config, err := newImage.MarshalJSON() + if err != nil { + return errors.Wrap(err, "failed to encode image config") + } + + exportedImage, err := b.docker.CreateImage(config, state.imageID, parentImage.OS) + if err != nil { + return errors.Wrapf(err, "failed to export image") + } + + state.imageID = exportedImage.ImageID() + b.imageSources.Add(newImageMount(exportedImage, newLayer)) + b.buildStages.update(state.imageID) + return nil +} + +func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error { + srcHash := getSourceHashFromInfos(inst.infos) + + // TODO: should this have been using origPaths instead of srcHash in the comment? + runConfigWithCommentCmd := copyRunConfig( + state.runConfig, + withCmdCommentString(fmt.Sprintf("%s %s in %s ", inst.cmdName, srcHash, inst.dest), b.platform)) + hit, err := b.probeCache(state, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + + imageMount, err := b.imageSources.Get(state.imageID, true) + if err != nil { + return errors.Wrapf(err, "failed to get destination image %q", state.imageID) + } + destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount) + if err != nil { + return err + } + + opts := copyFileOptions{ + decompress: inst.allowLocalDecompression, + archiver: b.archiver, + } + for _, info := range inst.infos { + if err := performCopyForInfo(destInfo, info, opts); err != nil { + return errors.Wrapf(err, "failed to copy files") + } + } + return b.exportImage(state, imageMount, runConfigWithCommentCmd) +} + +func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount) (copyInfo, error) { + // Twiddle the destination when it's a relative path - meaning, make it + // relative to the WORKINGDIR + dest, err := normaliseDest(workingDir, inst.dest) + if err != nil { + return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName) + } + + destMount, err := imageMount.Source() + if err != nil { + return copyInfo{}, errors.Wrapf(err, "failed to mount copy source") + } + + return newCopyInfoFromSource(destMount, dest, ""), nil +} + +// For backwards compat, if there's just one info then use it as the +// cache look-up string, otherwise hash 'em all into one +func getSourceHashFromInfos(infos []copyInfo) string { + if len(infos) == 1 { + return infos[0].hash + } + var hashs []string + for _, info := range infos { + hashs = append(hashs, info.hash) + } + return hashStringSlice("multi", hashs) +} + +func hashStringSlice(prefix string, slice []string) string { + hasher := sha256.New() + hasher.Write([]byte(strings.Join(slice, ","))) + return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) +} + +type runConfigModifier func(*container.Config) + +func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config { + copy := *runConfig + for _, modifier := range modifiers { + modifier(©) + } + return © +} + +func withCmd(cmd []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = cmd + } +} + +// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for +// why there are two almost identical versions of this. +func withCmdComment(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) ", comment) + } +} + +// withCmdCommentString exists to maintain compatibility with older versions. +// A few instructions (workdir, copy, add) used a nop comment that is a single arg +// where as all the other instructions used a two arg comment string. This +// function implements the single arg version. +func withCmdCommentString(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) "+comment) + } +} + +func withEnv(env []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Env = env + } +} + +// withEntrypointOverride sets an entrypoint on runConfig if the command is +// not empty. The entrypoint is left unmodified if command is empty. +// +// The dockerfile RUN instruction expect to run without an entrypoint +// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate +// will change a []string{""} entrypoint to nil, so we probe the cache with the +// nil entrypoint. +func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier { + return func(runConfig *container.Config) { + if len(cmd) > 0 { + runConfig.Entrypoint = entrypoint + } + } +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config, platform string) []string { + if 0 == len(c.Shell) { + return append([]string{}, defaultShellForPlatform(platform)[:]...) + } + return append([]string{}, c.Shell[:]...) +} + +func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { + cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) + if cachedID == "" || err != nil { + return false, err + } + fmt.Fprint(b.Stdout, " ---> Using cache\n") + + dispatchState.imageID = string(cachedID) + b.buildStages.update(dispatchState.imageID) + return true, nil +} + +var defaultLogConfig = container.LogConfig{Type: "none"} + +func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) { + if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { + return "", err + } + // Set a log config to override any default value set on the daemon + hostConfig := &container.HostConfig{LogConfig: defaultLogConfig} + container, err := b.containerManager.Create(runConfig, hostConfig, b.platform) + return container.ID, err +} + +func (b *Builder) create(runConfig *container.Config) (string, error) { + // only allow bind-mounting during build + for _, bind := range b.options.Volumes { + parsed, _ := loader.ParseVolume(bind) + if parsed.Source == "" { + return "", fmt.Errorf("Cannot use non-bind mount during build: %s", bind) + } + } + + hostConfig := hostConfigFromOptions(b.options) + container, err := b.containerManager.Create(runConfig, hostConfig, b.platform) + if err != nil { + return "", err + } + // TODO: could this be moved into containerManager.Create() ? + for _, warning := range container.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID)) + return container.ID, nil +} + +func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig { + resources := container.Resources{ + CgroupParent: options.CgroupParent, + CPUShares: options.CPUShares, + CPUPeriod: options.CPUPeriod, + CPUQuota: options.CPUQuota, + CpusetCpus: options.CPUSetCPUs, + CpusetMems: options.CPUSetMems, + Memory: options.Memory, + MemorySwap: options.MemorySwap, + Ulimits: options.Ulimits, + } + + return &container.HostConfig{ + SecurityOpt: options.SecurityOpt, + Isolation: options.Isolation, + ShmSize: options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(options.NetworkMode), + // Set a log config to override any default value set on the daemon + LogConfig: defaultLogConfig, + ExtraHosts: options.ExtraHosts, + Binds: options.Volumes, + } +} diff --git a/builder/dockerfile/internals_test.go b/builder/dockerfile/internals_test.go new file mode 100644 index 0000000..8073cc6 --- /dev/null +++ b/builder/dockerfile/internals_test.go @@ -0,0 +1,131 @@ +package dockerfile + +import ( + "fmt" + "runtime" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEmptyDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) + + readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "the Dockerfile (Dockerfile) cannot be empty") +} + +func TestSymlinkDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestSymlink(t, contextDir, builder.DefaultDockerfileName, "/etc/passwd") + + // The reason the error is "Cannot locate specified Dockerfile" is because + // in the builder, the symlink is resolved within the context, therefore + // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is + // a nonexistent file. + expectedError := fmt.Sprintf("Cannot locate specified Dockerfile: %s", builder.DefaultDockerfileName) + + readAndCheckDockerfile(t, "symlinkDockerfile", contextDir, builder.DefaultDockerfileName, expectedError) +} + +func TestDockerfileOutsideTheBuildContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Forbidden path outside the build context: ../../Dockerfile ()" + + readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError) +} + +func TestNonExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Cannot locate specified Dockerfile: Dockerfile" + + readAndCheckDockerfile(t, "NonExistingDockerfile", contextDir, "Dockerfile", expectedError) +} + +func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + require.NoError(t, err) + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + if dockerfilePath == "" { // handled in BuildWithContext + dockerfilePath = builder.DefaultDockerfileName + } + + config := backend.BuildConfig{ + Options: &types.ImageBuildOptions{Dockerfile: dockerfilePath}, + Source: tarStream, + } + _, _, err = remotecontext.Detect(config) + assert.EqualError(t, err, expectedError) +} + +func TestCopyRunConfig(t *testing.T) { + defaultEnv := []string{"foo=1"} + defaultCmd := []string{"old"} + + var testcases = []struct { + doc string + modifiers []runConfigModifier + expected *container.Config + }{ + { + doc: "Set the command", + modifiers: []runConfigModifier{withCmd([]string{"new"})}, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: defaultEnv, + }, + }, + { + doc: "Set the command to a comment", + modifiers: []runConfigModifier{withCmdComment("comment", runtime.GOOS)}, + expected: &container.Config{ + Cmd: append(defaultShellForPlatform(runtime.GOOS), "#(nop) ", "comment"), + Env: defaultEnv, + }, + }, + { + doc: "Set the command and env", + modifiers: []runConfigModifier{ + withCmd([]string{"new"}), + withEnv([]string{"one", "two"}), + }, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: []string{"one", "two"}, + }, + }, + } + + for _, testcase := range testcases { + runConfig := &container.Config{ + Cmd: defaultCmd, + Env: defaultEnv, + } + runConfigCopy := copyRunConfig(runConfig, testcase.modifiers...) + assert.Equal(t, testcase.expected, runConfigCopy, testcase.doc) + // Assert the original was not modified + assert.NotEqual(t, runConfig, runConfigCopy, testcase.doc) + } + +} diff --git a/builder/dockerfile/internals_unix.go b/builder/dockerfile/internals_unix.go new file mode 100644 index 0000000..f4784e1 --- /dev/null +++ b/builder/dockerfile/internals_unix.go @@ -0,0 +1,42 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) + if !system.IsAbs(requested) { + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func validateCopySourcePath(imageSource *imageMount, origPath string) error { + return nil +} diff --git a/builder/dockerfile/internals_windows.go b/builder/dockerfile/internals_windows.go new file mode 100644 index 0000000..bb32859 --- /dev/null +++ b/builder/dockerfile/internals_windows.go @@ -0,0 +1,95 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +var pathBlacklist = map[string]bool{ + "c:\\": true, + "c:\\windows": true, +} + +func validateCopySourcePath(imageSource *imageMount, origPath string) error { + // validate windows paths from other images + if imageSource == nil { + return nil + } + origPath = filepath.FromSlash(origPath) + p := strings.ToLower(filepath.Clean(origPath)) + if !filepath.IsAbs(p) { + if filepath.VolumeName(p) != "" { + if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths + p = p[:len(p)-1] + } + p += "\\" + } else { + p = filepath.Join("c:\\", p) + } + } + if _, blacklisted := pathBlacklist[p]; blacklisted { + return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") + } + return nil +} diff --git a/builder/dockerfile/internals_windows_test.go b/builder/dockerfile/internals_windows_test.go new file mode 100644 index 0000000..b4c8d4b --- /dev/null +++ b/builder/dockerfile/internals_windows_test.go @@ -0,0 +1,53 @@ +// +build windows + +package dockerfile + +import ( + "fmt" + "testing" + + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" +) + +func TestNormaliseDest(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, `D:\`, ``, `Windows does not support destinations not on the system drive (C:)`}, + {``, `e:/`, ``, `Windows does not support destinations not on the system drive (C:)`}, + {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, + {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, + {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, + {`D:\`, `.`, ``, "Windows does not support relative paths when WORKDIR is not the system drive"}, + {``, `D`, `D`, ``}, + {``, `./a1`, `.\a1`, ``}, + {``, `.\b1`, `.\b1`, ``}, + {``, `/`, `\`, ``}, + {``, `\`, `\`, ``}, + {``, `c:/`, `\`, ``}, + {``, `c:\`, `\`, ``}, + {``, `.`, `.`, ``}, + {`C:\wdd`, `./a1`, `\wdd\a1`, ``}, + {`C:\wde`, `.\b1`, `\wde\b1`, ``}, + {`C:\wdf`, `/`, `\`, ``}, + {`C:\wdg`, `\`, `\`, ``}, + {`C:\wdh`, `c:/`, `\`, ``}, + {`C:\wdi`, `c:\`, `\`, ``}, + {`C:\wdj`, `.`, `\wdj`, ``}, + {`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``}, + {`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``}, + {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, + {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, + } + for _, testcase := range tests { + msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested) + actual, err := normaliseDest(testcase.current, testcase.requested) + if testcase.etext == "" { + if !assert.NoError(t, err, msg) { + continue + } + assert.Equal(t, testcase.expected, actual, msg) + } else { + testutil.ErrorContains(t, err, testcase.etext) + } + } +} diff --git a/builder/dockerfile/metrics.go b/builder/dockerfile/metrics.go new file mode 100644 index 0000000..5aa953a --- /dev/null +++ b/builder/dockerfile/metrics.go @@ -0,0 +1,44 @@ +package dockerfile + +import ( + "github.com/docker/go-metrics" +) + +var ( + buildsTriggered metrics.Counter + buildsFailed metrics.LabeledCounter +) + +// Build metrics prometheus messages, these values must be initialized before +// using them. See the example below in the "builds_failed" metric definition. +const ( + metricsDockerfileSyntaxError = "dockerfile_syntax_error" + metricsDockerfileEmptyError = "dockerfile_empty_error" + metricsCommandNotSupportedError = "command_not_supported_error" + metricsErrorProcessingCommandsError = "error_processing_commands_error" + metricsBuildTargetNotReachableError = "build_target_not_reachable_error" + metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error" + metricsUnknownInstructionError = "unknown_instruction_error" + metricsBuildCanceled = "build_canceled" +) + +func init() { + buildMetrics := metrics.NewNamespace("builder", "", nil) + + buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds") + buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason") + for _, r := range []string{ + metricsDockerfileSyntaxError, + metricsDockerfileEmptyError, + metricsCommandNotSupportedError, + metricsErrorProcessingCommandsError, + metricsBuildTargetNotReachableError, + metricsMissingOnbuildArgumentsError, + metricsUnknownInstructionError, + metricsBuildCanceled, + } { + buildsFailed.WithValues(r) + } + + metrics.Register(buildMetrics) +} diff --git a/builder/dockerfile/mockbackend_test.go b/builder/dockerfile/mockbackend_test.go new file mode 100644 index 0000000..adc2276 --- /dev/null +++ b/builder/dockerfile/mockbackend_test.go @@ -0,0 +1,130 @@ +package dockerfile + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "golang.org/x/net/context" +) + +// MockBackend implements the builder.Backend interface for unit testing +type MockBackend struct { + containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + commitFunc func(string, *backend.ContainerCommitConfig) (string, error) + getImageFunc func(string) (builder.Image, builder.ReleaseableLayer, error) + makeImageCacheFunc func(cacheFrom []string, platform string) builder.ImageCache +} + +func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error { + return nil +} + +func (m *MockBackend) ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + if m.containerCreateFunc != nil { + return m.containerCreateFunc(config) + } + return container.ContainerCreateCreatedBody{}, nil +} + +func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) error { + return nil +} + +func (m *MockBackend) Commit(cID string, cfg *backend.ContainerCommitConfig) (string, error) { + if m.commitFunc != nil { + return m.commitFunc(cID, cfg) + } + return "", nil +} + +func (m *MockBackend) ContainerKill(containerID string, sig uint64) error { + return nil +} + +func (m *MockBackend) ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error { + return nil +} + +func (m *MockBackend) ContainerWait(ctx context.Context, containerID string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) { + return nil, nil +} + +func (m *MockBackend) ContainerCreateWorkdir(containerID string) error { + return nil +} + +func (m *MockBackend) CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error { + return nil +} + +func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { + if m.getImageFunc != nil { + return m.getImageFunc(refOrID) + } + + return &mockImage{id: "theid"}, &mockLayer{}, nil +} + +func (m *MockBackend) MakeImageCache(cacheFrom []string, platform string) builder.ImageCache { + if m.makeImageCacheFunc != nil { + return m.makeImageCacheFunc(cacheFrom, platform) + } + return nil +} + +func (m *MockBackend) CreateImage(config []byte, parent string, platform string) (builder.Image, error) { + return nil, nil +} + +type mockImage struct { + id string + config *container.Config +} + +func (i *mockImage) ImageID() string { + return i.id +} + +func (i *mockImage) RunConfig() *container.Config { + return i.config +} + +func (i *mockImage) MarshalJSON() ([]byte, error) { + type rawImage mockImage + return json.Marshal(rawImage(*i)) +} + +type mockImageCache struct { + getCacheFunc func(parentID string, cfg *container.Config) (string, error) +} + +func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (string, error) { + if mic.getCacheFunc != nil { + return mic.getCacheFunc(parentID, cfg) + } + return "", nil +} + +type mockLayer struct{} + +func (l *mockLayer) Release() error { + return nil +} + +func (l *mockLayer) Mount() (string, error) { + return "mountPath", nil +} + +func (l *mockLayer) Commit(string) (builder.ReleaseableLayer, error) { + return nil, nil +} + +func (l *mockLayer) DiffID() layer.DiffID { + return layer.DiffID("abcdef") +} diff --git a/builder/dockerfile/parser/dumper/main.go b/builder/dockerfile/parser/dumper/main.go new file mode 100644 index 0000000..ea62050 --- /dev/null +++ b/builder/dockerfile/parser/dumper/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/dockerfile/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + defer f.Close() + + result, err := parser.Parse(f) + if err != nil { + panic(err) + } + fmt.Println(result.AST.Dump()) + } +} diff --git a/builder/dockerfile/parser/json_test.go b/builder/dockerfile/parser/json_test.go new file mode 100644 index 0000000..d448919 --- /dev/null +++ b/builder/dockerfile/parser/json_test.go @@ -0,0 +1,59 @@ +package parser + +import ( + "testing" +) + +var invalidJSONArraysOfStrings = []string{ + `["a",42,"b"]`, + `["a",123.456,"b"]`, + `["a",{},"b"]`, + `["a",{"c": "d"},"b"]`, + `["a",["c"],"b"]`, + `["a",true,"b"]`, + `["a",false,"b"]`, + `["a",null,"b"]`, +} + +var validJSONArraysOfStrings = map[string][]string{ + `[]`: {}, + `[""]`: {""}, + `["a"]`: {"a"}, + `["a","b"]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + ` [ "a", "b" ] `: {"a", "b"}, + `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, +} + +func TestJSONArraysOfStrings(t *testing.T) { + for json, expected := range validJSONArraysOfStrings { + d := NewDefaultDirective() + + if node, _, err := parseJSON(json, d); err != nil { + t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) + } else { + i := 0 + for node != nil { + if i >= len(expected) { + t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) + } + if node.Value != expected[i] { + t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) + } + node = node.Next + i++ + } + if i != len(expected) { + t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) + } + } + } + for _, json := range invalidJSONArraysOfStrings { + d := NewDefaultDirective() + + if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray { + t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) + } + } +} diff --git a/builder/dockerfile/parser/line_parsers.go b/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 0000000..d0e182e --- /dev/null +++ b/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,399 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "github.com/docker/docker/builder/dockerfile/command" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +const ( + commandLabel = "LABEL" +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + child, err := newNodeFromLine(rest, d) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string, d *Directive) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + var chWidth int + + for pos := 0; pos <= len(rest); pos += chWidth { + if pos != len(rest) { + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == d.escapeToken { + if pos+chWidth == len(rest) { + continue // just skip an escape token at end of line + } + // If we're not quoted and we see an escape token, then always just + // add the escape token plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos += chWidth + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // The escape token is special except for ' quotes - can't escape anything for ' + if ch == d.escapeToken && quote != '\'' { + if pos+chWidth == len(rest) { + phase = inWord + continue // just skip the escape token at end + } + pos += chWidth + word += string(ch) + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string, d *Directive) (*Node, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil + } + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + parts := tokenWhitespace.Split(rest, 2) + if len(parts) < 2 { + return nil, fmt.Errorf(key + " must have two arguments") + } + return newKeyValueNode(parts[0], parts[1]), nil + } + + var rootNode *Node + var prevNode *Node + for _, word := range words { + if !strings.Contains(word, "=") { + return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + + parts := strings.SplitN(word, "=", 2) + node := newKeyValueNode(parts[0], parts[1]) + rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) + } + + return rootNode, nil +} + +func newKeyValueNode(key, value string) *Node { + return &Node{ + Value: key, + Next: &Node{Value: value}, + } +} + +func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) { + if rootNode == nil { + rootNode = node + } + if prevNode != nil { + prevNode.Next = node + } + + prevNode = node.Next + return rootNode, prevNode +} + +func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, "ENV", d) + return node, nil, err +} + +func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, commandLabel, d) + return node, nil, err +} + +// NodeFromLabels returns a Node for the injected labels +func NodeFromLabels(labels map[string]string) *Node { + keys := []string{} + for key := range labels { + keys = append(keys, key) + } + // Sort the label to have a repeatable order + sort.Strings(keys) + + labelPairs := []string{} + var rootNode *Node + var prevNode *Node + for _, key := range keys { + value := labels[key] + labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value)) + // Value must be single quoted to prevent env variable expansion + // See https://github.com/docker/docker/issues/26027 + node := newKeyValueNode(key, "'"+value+"'") + rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) + } + + return &Node{ + Value: command.Label, + Original: commandLabel + " " + strings.Join(labelPairs, " "), + Next: rootNode, + } +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parseString just wraps the string in quotes and returns a working node. +func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest, d) +} + +// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. +func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { + // Find end of first argument + var sep int + for ; sep < len(rest); sep++ { + if unicode.IsSpace(rune(rest[sep])) { + break + } + } + next := sep + for ; next < len(rest); next++ { + if !unicode.IsSpace(rune(rest[next])) { + break + } + } + + if sep == 0 { + return nil, nil, nil + } + + typ := rest[:sep] + cmd, attrs, err := parseMaybeJSON(rest[next:], d) + if err != nil { + return nil, nil, err + } + + return &Node{Value: typ, Next: cmd}, attrs, err +} diff --git a/builder/dockerfile/parser/line_parsers_test.go b/builder/dockerfile/parser/line_parsers_test.go new file mode 100644 index 0000000..cf0b21b --- /dev/null +++ b/builder/dockerfile/parser/line_parsers_test.go @@ -0,0 +1,74 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseNameValOldFormat(t *testing.T) { + directive := Directive{} + node, err := parseNameVal("foo bar", "LABEL", &directive) + assert.NoError(t, err) + + expected := &Node{ + Value: "foo", + Next: &Node{Value: "bar"}, + } + assert.Equal(t, expected, node) +} + +func TestParseNameValNewFormat(t *testing.T) { + directive := Directive{} + node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive) + assert.NoError(t, err) + + expected := &Node{ + Value: "foo", + Next: &Node{ + Value: "bar", + Next: &Node{ + Value: "thing", + Next: &Node{ + Value: "star", + }, + }, + }, + } + assert.Equal(t, expected, node) +} + +func TestNodeFromLabels(t *testing.T) { + labels := map[string]string{ + "foo": "bar", + "weird": "first' second", + } + expected := &Node{ + Value: "label", + Original: `LABEL "foo"='bar' "weird"='first' second'`, + Next: &Node{ + Value: "foo", + Next: &Node{ + Value: "'bar'", + Next: &Node{ + Value: "weird", + Next: &Node{ + Value: "'first' second'", + }, + }, + }, + }, + } + + node := NodeFromLabels(labels) + assert.Equal(t, expected, node) + +} + +func TestParseNameValWithoutVal(t *testing.T) { + directive := Directive{} + // In Config.Env, a variable without `=` is removed from the environment. (#31634) + // However, in Dockerfile, we don't allow "unsetting" an environment variable. (#11922) + _, err := parseNameVal("foo", "ENV", &directive) + assert.Error(t, err, "ENV must have two arguments") +} diff --git a/builder/dockerfile/parser/parser.go b/builder/dockerfile/parser/parser.go new file mode 100644 index 0000000..7f07ff2 --- /dev/null +++ b/builder/dockerfile/parser/parser.go @@ -0,0 +1,355 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "runtime" + "strconv" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + endLine int // the line in the original dockerfile where the node ends +} + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + + return strings.TrimSpace(str) +} + +func (node *Node) lines(start, end int) { + node.StartLine = start + node.endLine = end +} + +// AddChild adds a new child node, and updates line information +func (node *Node) AddChild(child *Node, startLine, endLine int) { + child.lines(startLine, endLine) + if node.StartLine < 0 { + node.StartLine = startLine + } + node.endLine = endLine + node.Children = append(node.Children, child) +} + +var ( + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P.*)$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +// DefaultEscapeToken is the default escape token +const DefaultEscapeToken = '\\' + +// defaultPlatformToken is the platform assumed for the build if not explicitly provided +var defaultPlatformToken = runtime.GOOS + +// Directive is the structure used during a build run to hold the state of +// parsing directives. +type Directive struct { + escapeToken rune // Current escape token + platformToken string // Current platform token + lineContinuationRegex *regexp.Regexp // Current line continuation regex + processingComplete bool // Whether we are done looking for directives + escapeSeen bool // Whether the escape directive has been seen + platformSeen bool // Whether the platform directive has been seen +} + +// setEscapeToken sets the default token for escaping characters in a Dockerfile. +func (d *Directive) setEscapeToken(s string) error { + if s != "`" && s != "\\" { + return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) + } + d.escapeToken = rune(s[0]) + d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + return nil +} + +// setPlatformToken sets the default platform for pulling images in a Dockerfile. +func (d *Directive) setPlatformToken(s string) error { + s = strings.ToLower(s) + valid := []string{runtime.GOOS} + if system.LCOWSupported() { + valid = append(valid, "linux") + } + for _, item := range valid { + if s == item { + d.platformToken = s + return nil + } + } + return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid) +} + +// possibleParserDirective looks for one or more parser directives '# escapeToken=' and +// '# platform='. Parser directives must precede any builder instruction +// or other comments, and cannot be repeated. +func (d *Directive) possibleParserDirective(line string) error { + if d.processingComplete { + return nil + } + + tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tecMatch) != 0 { + for i, n := range tokenEscapeCommand.SubexpNames() { + if n == "escapechar" { + if d.escapeSeen == true { + return errors.New("only one escape parser directive can be used") + } + d.escapeSeen = true + return d.setEscapeToken(tecMatch[i]) + } + } + } + + // TODO @jhowardmsft LCOW Support: Eventually this check can be removed, + // but only recognise a platform token if running in LCOW mode. + if system.LCOWSupported() { + tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tpcMatch) != 0 { + for i, n := range tokenPlatformCommand.SubexpNames() { + if n == "platform" { + if d.platformSeen == true { + return errors.New("only one platform parser directive can be used") + } + d.platformSeen = true + return d.setPlatformToken(tpcMatch[i]) + } + } + } + } + + d.processingComplete = true + return nil +} + +// NewDefaultDirective returns a new Directive with the default escapeToken token +func NewDefaultDirective() *Directive { + directive := Directive{} + directive.setEscapeToken(string(DefaultEscapeToken)) + directive.setPlatformToken(defaultPlatformToken) + return &directive +} + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ + command.Add: parseMaybeJSONToList, + command.Arg: parseNameOrNameVal, + command.Cmd: parseMaybeJSON, + command.Copy: parseMaybeJSONToList, + command.Entrypoint: parseMaybeJSON, + command.Env: parseEnv, + command.Expose: parseStringsWhitespaceDelimited, + command.From: parseStringsWhitespaceDelimited, + command.Healthcheck: parseHealthConfig, + command.Label: parseLabel, + command.Maintainer: parseString, + command.Onbuild: parseSubCommand, + command.Run: parseMaybeJSON, + command.Shell: parseMaybeJSON, + command.StopSignal: parseString, + command.User: parseString, + command.Volume: parseMaybeJSONToList, + command.Workdir: parseString, + } +} + +// newNodeFromLine splits the line into parts, and dispatches to a function +// based on the command and command arguments. A Node is created from the +// result of the dispatch. +func newNodeFromLine(line string, directive *Directive) (*Node, error) { + cmd, flags, args, err := splitCommand(line) + if err != nil { + return nil, err + } + + fn := dispatch[cmd] + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + next, attrs, err := fn(args, directive) + if err != nil { + return nil, err + } + + return &Node{ + Value: cmd, + Original: line, + Flags: flags, + Next: next, + Attributes: attrs, + }, nil +} + +// Result is the result of parsing a Dockerfile +type Result struct { + AST *Node + EscapeToken rune + Platform string + Warnings []string +} + +// PrintWarnings to the writer +func (r *Result) PrintWarnings(out io.Writer) { + if len(r.Warnings) == 0 { + return + } + fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n") +} + +// Parse reads lines from a Reader, parses the lines into an AST and returns +// the AST and escape token +func Parse(rwc io.Reader) (*Result, error) { + d := NewDefaultDirective() + currentLine := 0 + root := &Node{StartLine: -1} + scanner := bufio.NewScanner(rwc) + warnings := []string{} + + var err error + for scanner.Scan() { + bytesRead := scanner.Bytes() + if currentLine == 0 { + // First line, strip the byte-order-marker if present + bytesRead = bytes.TrimPrefix(bytesRead, utf8bom) + } + bytesRead, err = processLine(d, bytesRead, true) + if err != nil { + return nil, err + } + currentLine++ + + startLine := currentLine + line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d) + if isEndOfLine && line == "" { + continue + } + + var hasEmptyContinuationLine bool + for !isEndOfLine && scanner.Scan() { + bytesRead, err := processLine(d, scanner.Bytes(), false) + if err != nil { + return nil, err + } + currentLine++ + + if isEmptyContinuationLine(bytesRead) { + hasEmptyContinuationLine = true + continue + } + + continuationLine := string(bytesRead) + continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) + line += continuationLine + } + + if hasEmptyContinuationLine { + warning := "[WARNING]: Empty continuation line found in:\n " + line + warnings = append(warnings, warning) + } + + child, err := newNodeFromLine(line, d) + if err != nil { + return nil, err + } + root.AddChild(child, startLine, currentLine) + } + + if len(warnings) > 0 { + warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.") + } + return &Result{ + AST: root, + Warnings: warnings, + EscapeToken: d.escapeToken, + Platform: d.platformToken, + }, nil +} + +func trimComments(src []byte) []byte { + return tokenComment.ReplaceAll(src, []byte{}) +} + +func trimWhitespace(src []byte) []byte { + return bytes.TrimLeftFunc(src, unicode.IsSpace) +} + +func isEmptyContinuationLine(line []byte) bool { + return len(trimComments(trimWhitespace(line))) == 0 +} + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +func trimContinuationCharacter(line string, d *Directive) (string, bool) { + if d.lineContinuationRegex.MatchString(line) { + line = d.lineContinuationRegex.ReplaceAllString(line, "") + return line, false + } + return line, true +} + +// TODO: remove stripLeftWhitespace after deprecation period. It seems silly +// to preserve whitespace on continuation lines. Why is that done? +func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) { + if stripLeftWhitespace { + token = trimWhitespace(token) + } + return trimComments(token), d.possibleParserDirective(string(token)) +} diff --git a/builder/dockerfile/parser/parser_test.go b/builder/dockerfile/parser/parser_test.go new file mode 100644 index 0000000..bb057ec --- /dev/null +++ b/builder/dockerfile/parser/parser_test.go @@ -0,0 +1,154 @@ +package parser + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" +const testFileLineInfo = "testfile-line/Dockerfile" + +func getDirs(t *testing.T, dir string) []string { + f, err := os.Open(dir) + require.NoError(t, err) + defer f.Close() + + dirs, err := f.Readdirnames(0) + require.NoError(t, err) + return dirs +} + +func TestParseErrorCases(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") + + df, err := os.Open(dockerfile) + require.NoError(t, err, dockerfile) + defer df.Close() + + _, err = Parse(df) + assert.Error(t, err, dockerfile) + } +} + +func TestParseCases(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir, "Dockerfile") + resultfile := filepath.Join(testDir, dir, "result") + + df, err := os.Open(dockerfile) + require.NoError(t, err, dockerfile) + defer df.Close() + + result, err := Parse(df) + require.NoError(t, err, dockerfile) + + content, err := ioutil.ReadFile(resultfile) + require.NoError(t, err, resultfile) + + if runtime.GOOS == "windows" { + // CRLF --> CR to match Unix behavior + content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) + } + assert.Equal(t, result.AST.Dump()+"\n", string(content), "In "+dockerfile) + } +} + +func TestParseWords(t *testing.T) { + tests := []map[string][]string{ + { + "input": {"foo"}, + "expect": {"foo"}, + }, + { + "input": {"foo bar"}, + "expect": {"foo", "bar"}, + }, + { + "input": {"foo\\ bar"}, + "expect": {"foo\\ bar"}, + }, + { + "input": {"foo=bar"}, + "expect": {"foo=bar"}, + }, + { + "input": {"foo bar 'abc xyz'"}, + "expect": {"foo", "bar", "'abc xyz'"}, + }, + { + "input": {`foo bar "abc xyz"`}, + "expect": {"foo", "bar", `"abc xyz"`}, + }, + { + "input": {"àöû"}, + "expect": {"àöû"}, + }, + { + "input": {`föo bàr "âbc xÿz"`}, + "expect": {"föo", "bàr", `"âbc xÿz"`}, + }, + } + + for _, test := range tests { + words := parseWords(test["input"][0], NewDefaultDirective()) + assert.Equal(t, test["expect"], words) + } +} + +func TestParseIncludesLineNumbers(t *testing.T) { + df, err := os.Open(testFileLineInfo) + require.NoError(t, err) + defer df.Close() + + result, err := Parse(df) + require.NoError(t, err) + + ast := result.AST + assert.Equal(t, 5, ast.StartLine) + assert.Equal(t, 31, ast.endLine) + assert.Len(t, ast.Children, 3) + expected := [][]int{ + {5, 5}, + {11, 12}, + {17, 31}, + } + for i, child := range ast.Children { + msg := fmt.Sprintf("Child %d", i) + assert.Equal(t, expected[i], []int{child.StartLine, child.endLine}, msg) + } +} + +func TestParseWarnsOnEmptyContinutationLine(t *testing.T) { + dockerfile := bytes.NewBufferString(` +FROM alpine:3.6 + +RUN something \ + + following \ + + more + +RUN another \ + + thing + `) + + result, err := Parse(dockerfile) + require.NoError(t, err) + warnings := result.Warnings + assert.Len(t, warnings, 3) + assert.Contains(t, warnings[0], "Empty continuation line found in") + assert.Contains(t, warnings[0], "RUN something following more") + assert.Contains(t, warnings[1], "RUN another thing") + assert.Contains(t, warnings[2], "will become errors in a future release") +} diff --git a/builder/dockerfile/parser/split_command.go b/builder/dockerfile/parser/split_command.go new file mode 100644 index 0000000..171f454 --- /dev/null +++ b/builder/dockerfile/parser/split_command.go @@ -0,0 +1,118 @@ +package parser + +import ( + "strings" + "unicode" +) + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found something with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/builder/dockerfile/parser/testfile-line/Dockerfile b/builder/dockerfile/parser/testfile-line/Dockerfile new file mode 100644 index 0000000..0a663b7 --- /dev/null +++ b/builder/dockerfile/parser/testfile-line/Dockerfile @@ -0,0 +1,31 @@ +# ESCAPE=\ + + + +FROM brimstone/ubuntu:14.04 + + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + + +ENV GOPATH \ +/go + + + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + + + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile new file mode 100644 index 0000000..1d65578 --- /dev/null +++ b/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH diff --git a/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 0000000..d1be459 --- /dev/null +++ b/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile new file mode 100644 index 0000000..035b4e8 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:14.04 +LABEL maintainer Seongyeol Lim + +COPY . /go/src/github.com/docker/docker +ADD . / +ADD null / +COPY nullfile /tmp +ADD [ "vimrc", "/tmp" ] +COPY [ "bashrc", "/tmp" ] +COPY [ "test file", "/tmp" ] +ADD [ "test file", "/tmp/test file" ] diff --git a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result new file mode 100644 index 0000000..d1f71ec --- /dev/null +++ b/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(label "maintainer" "Seongyeol Lim ") +(copy "." "/go/src/github.com/docker/docker") +(add "." "/") +(add "null" "/") +(copy "nullfile" "/tmp") +(add "vimrc" "/tmp") +(copy "bashrc" "/tmp") +(copy "test file" "/tmp") +(add "test file" "/tmp/test file") diff --git a/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 0000000..9c0952a --- /dev/null +++ b/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,26 @@ +#escape=\ +FROM brimstone/ubuntu:14.04 + +LABEL maintainer brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/builder/dockerfile/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 0000000..3b45db6 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(label "maintainer" "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 0000000..25ae352 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 0000000..16492e5 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile b/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile new file mode 100644 index 0000000..a8ec369 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/continue-at-eof/Dockerfile @@ -0,0 +1,3 @@ +FROM alpine:3.5 + +RUN something \ \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/continue-at-eof/result b/builder/dockerfile/parser/testfiles/continue-at-eof/result new file mode 100644 index 0000000..14e4f09 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/continue-at-eof/result @@ -0,0 +1,2 @@ +(from "alpine:3.5") +(run "something") diff --git a/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile new file mode 100644 index 0000000..42b324e --- /dev/null +++ b/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff --git a/builder/dockerfile/parser/testfiles/continueIndent/result b/builder/dockerfile/parser/testfiles/continueIndent/result new file mode 100644 index 0000000..268ae07 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/continueIndent/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff --git a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 0000000..8ccb71a --- /dev/null +++ b/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 0000000..25dd3dd --- /dev/null +++ b/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff --git a/builder/dockerfile/parser/testfiles/docker/Dockerfile b/builder/dockerfile/parser/testfiles/docker/Dockerfile new file mode 100644 index 0000000..5153453 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/docker/Dockerfile @@ -0,0 +1,102 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ubuntu:14.04 +LABEL maintainer Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/builder/dockerfile/parser/testfiles/docker/result b/builder/dockerfile/parser/testfiles/docker/result new file mode 100644 index 0000000..0c2f229 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/docker/result @@ -0,0 +1,24 @@ +(from "ubuntu:14.04") +(label "maintainer" "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get golang.org/x/tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff --git a/builder/dockerfile/parser/testfiles/env/Dockerfile b/builder/dockerfile/parser/testfiles/env/Dockerfile new file mode 100644 index 0000000..08fa18a --- /dev/null +++ b/builder/dockerfile/parser/testfiles/env/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name="a\"b" +ENV name="a\'b" +ENV name='a\'b' +ENV name='a\'b'' +ENV name='a\"b' +ENV name="''" +# don't put anything after the next line - it must be the last line of the +# Dockerfile and it must end with \ +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/builder/dockerfile/parser/testfiles/env/result b/builder/dockerfile/parser/testfiles/env/result new file mode 100644 index 0000000..ba0a6dd --- /dev/null +++ b/builder/dockerfile/parser/testfiles/env/result @@ -0,0 +1,16 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "\"value value1\"") +(env "name" "value\\ value2") +(env "name" "\"value'quote space'value2\"") +(env "name" "'value\"double quote\"value2'") +(env "name" "value\\ value2" "name2" "value2\\ value3") +(env "name" "\"a\\\"b\"") +(env "name" "\"a\\'b\"") +(env "name" "'a\\'b'") +(env "name" "'a\\'b''") +(env "name" "'a\\\"b'") +(env "name" "\"''\"") +(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile new file mode 100644 index 0000000..18e9a47 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile @@ -0,0 +1,9 @@ +# Comment here. Should not be looking for the following parser directive. +# Hence the following line will be ignored, and the subsequent backslash +# continuation will be the default. +# escape = ` + +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH \ +\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape-after-comment/result b/builder/dockerfile/parser/testfiles/escape-after-comment/result new file mode 100644 index 0000000..9ab119c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/escape-after-comment/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile new file mode 100644 index 0000000..366ee3c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile @@ -0,0 +1,7 @@ +# escape = `` +# There is no white space line after the directives. This still succeeds, but goes +# against best practices. +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape-nonewline/result b/builder/dockerfile/parser/testfiles/escape-nonewline/result new file mode 100644 index 0000000..9ab119c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/escape-nonewline/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escape/Dockerfile b/builder/dockerfile/parser/testfiles/escape/Dockerfile new file mode 100644 index 0000000..a515af1 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/escape/Dockerfile @@ -0,0 +1,6 @@ +#escape = ` + +FROM image +LABEL maintainer foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape/result b/builder/dockerfile/parser/testfiles/escape/result new file mode 100644 index 0000000..9ab119c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/escape/result @@ -0,0 +1,3 @@ +(from "image") +(label "maintainer" "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/builder/dockerfile/parser/testfiles/escapes/Dockerfile new file mode 100644 index 0000000..0306239 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/builder/dockerfile/parser/testfiles/escapes/result b/builder/dockerfile/parser/testfiles/escapes/result new file mode 100644 index 0000000..98e3e3b --- /dev/null +++ b/builder/dockerfile/parser/testfiles/escapes/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/builder/dockerfile/parser/testfiles/flags/Dockerfile b/builder/dockerfile/parser/testfiles/flags/Dockerfile new file mode 100644 index 0000000..2418e0f --- /dev/null +++ b/builder/dockerfile/parser/testfiles/flags/Dockerfile @@ -0,0 +1,10 @@ +FROM scratch +COPY foo /tmp/ +COPY --user=me foo /tmp/ +COPY --doit=true foo /tmp/ +COPY --user=me --doit=true foo /tmp/ +COPY --doit=true -- foo /tmp/ +COPY -- foo /tmp/ +CMD --doit [ "a", "b" ] +CMD --doit=true -- [ "a", "b" ] +CMD --doit -- [ ] diff --git a/builder/dockerfile/parser/testfiles/flags/result b/builder/dockerfile/parser/testfiles/flags/result new file mode 100644 index 0000000..4578f4c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/flags/result @@ -0,0 +1,10 @@ +(from "scratch") +(copy "foo" "/tmp/") +(copy ["--user=me"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy ["--user=me" "--doit=true"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy "foo" "/tmp/") +(cmd ["--doit"] "a" "b") +(cmd ["--doit=true"] "a" "b") +(cmd ["--doit"]) diff --git a/builder/dockerfile/parser/testfiles/health/Dockerfile b/builder/dockerfile/parser/testfiles/health/Dockerfile new file mode 100644 index 0000000..081e442 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/health/Dockerfile @@ -0,0 +1,10 @@ +FROM debian +ADD check.sh main.sh /app/ +CMD /app/main.sh +HEALTHCHECK +HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ + CMD /app/check.sh --quiet +HEALTHCHECK CMD +HEALTHCHECK CMD a b +HEALTHCHECK --timeout=3s CMD ["foo"] +HEALTHCHECK CONNECT TCP 7000 diff --git a/builder/dockerfile/parser/testfiles/health/result b/builder/dockerfile/parser/testfiles/health/result new file mode 100644 index 0000000..092924f --- /dev/null +++ b/builder/dockerfile/parser/testfiles/health/result @@ -0,0 +1,9 @@ +(from "debian") +(add "check.sh" "main.sh" "/app/") +(cmd "/app/main.sh") +(healthcheck) +(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") +(healthcheck "CMD") +(healthcheck "CMD" "a b") +(healthcheck ["--timeout=3s"] "CMD" "foo") +(healthcheck "CONNECT" "TCP 7000") diff --git a/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/builder/dockerfile/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 0000000..587fb9b --- /dev/null +++ b/builder/dockerfile/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/builder/dockerfile/parser/testfiles/influxdb/result b/builder/dockerfile/parser/testfiles/influxdb/result new file mode 100644 index 0000000..0998e87 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 0000000..39fe27d --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 0000000..afc220c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 0000000..eaae081 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 0000000..484804e --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 0000000..c3ac63c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 0000000..6147891 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 0000000..5fd4afa --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 0000000..1ffbb8f --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 0000000..30cc4bb --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 0000000..3204814 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/builder/dockerfile/parser/testfiles/json/Dockerfile b/builder/dockerfile/parser/testfiles/json/Dockerfile new file mode 100644 index 0000000..a586917 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/json/Dockerfile @@ -0,0 +1,8 @@ +CMD [] +CMD [""] +CMD ["a"] +CMD ["a","b"] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/builder/dockerfile/parser/testfiles/json/result b/builder/dockerfile/parser/testfiles/json/result new file mode 100644 index 0000000..c6553e6 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/json/result @@ -0,0 +1,8 @@ +(cmd) +(cmd "") +(cmd "a") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 0000000..728ec9a --- /dev/null +++ b/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +LABEL maintainer James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 0000000..e774bc4 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(label "maintainer" "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff --git a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 0000000..27f28cb --- /dev/null +++ b/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +LABEL maintainer docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 0000000..8a499ff --- /dev/null +++ b/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(label "maintainer" "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff --git a/builder/dockerfile/parser/testfiles/mail/Dockerfile b/builder/dockerfile/parser/testfiles/mail/Dockerfile new file mode 100644 index 0000000..f64c116 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/builder/dockerfile/parser/testfiles/mail/result b/builder/dockerfile/parser/testfiles/mail/result new file mode 100644 index 0000000..a0efcf0 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff --git a/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile new file mode 100644 index 0000000..57bb597 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/builder/dockerfile/parser/testfiles/multiple-volumes/result b/builder/dockerfile/parser/testfiles/multiple-volumes/result new file mode 100644 index 0000000..18dbdee --- /dev/null +++ b/builder/dockerfile/parser/testfiles/multiple-volumes/result @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/builder/dockerfile/parser/testfiles/mumble/Dockerfile new file mode 100644 index 0000000..5b9ec06 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/builder/dockerfile/parser/testfiles/mumble/result b/builder/dockerfile/parser/testfiles/mumble/result new file mode 100644 index 0000000..a0036a9 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff --git a/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/builder/dockerfile/parser/testfiles/nginx/Dockerfile new file mode 100644 index 0000000..0a35e2c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/builder/dockerfile/parser/testfiles/nginx/result b/builder/dockerfile/parser/testfiles/nginx/result new file mode 100644 index 0000000..a895fad --- /dev/null +++ b/builder/dockerfile/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff --git a/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/builder/dockerfile/parser/testfiles/tf2/Dockerfile new file mode 100644 index 0000000..72b79bd --- /dev/null +++ b/builder/dockerfile/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/builder/dockerfile/parser/testfiles/tf2/result b/builder/dockerfile/parser/testfiles/tf2/result new file mode 100644 index 0000000..d4f94cd --- /dev/null +++ b/builder/dockerfile/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/builder/dockerfile/parser/testfiles/weechat/Dockerfile new file mode 100644 index 0000000..4842088 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/builder/dockerfile/parser/testfiles/weechat/result b/builder/dockerfile/parser/testfiles/weechat/result new file mode 100644 index 0000000..c3abb4c --- /dev/null +++ b/builder/dockerfile/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff --git a/builder/dockerfile/parser/testfiles/znc/Dockerfile b/builder/dockerfile/parser/testfiles/znc/Dockerfile new file mode 100644 index 0000000..626b126 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +LABEL maintainer Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/builder/dockerfile/parser/testfiles/znc/result b/builder/dockerfile/parser/testfiles/znc/result new file mode 100644 index 0000000..bfc7f65 --- /dev/null +++ b/builder/dockerfile/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(label "maintainer" "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff --git a/builder/dockerfile/shell_parser.go b/builder/dockerfile/shell_parser.go new file mode 100644 index 0000000..b72ac29 --- /dev/null +++ b/builder/dockerfile/shell_parser.go @@ -0,0 +1,344 @@ +package dockerfile + +import ( + "bytes" + "strings" + "text/scanner" + "unicode" + + "github.com/pkg/errors" +) + +// ShellLex performs shell word splitting and variable expansion. +// +// ShellLex takes a string and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section +type ShellLex struct { + escapeToken rune +} + +// NewShellLex creates a new ShellLex which uses escapeToken to escape quotes. +func NewShellLex(escapeToken rune) *ShellLex { + return &ShellLex{escapeToken: escapeToken} +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func (s *ShellLex) ProcessWord(word string, env []string) (string, error) { + word, _, err := s.process(word, env) + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func (s *ShellLex) ProcessWords(word string, env []string) ([]string, error) { + _, words, err := s.process(word, env) + return words, err +} + +func (s *ShellLex) process(word string, env []string) (string, []string, error) { + sw := &shellWord{ + envs: env, + escapeToken: s.escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + return sw.process(word) +} + +type shellWord struct { + scanner scanner.Scanner + envs []string + escapeToken rune +} + +func (sw *shellWord) process(source string) (string, []string, error) { + word, words, err := sw.processStopOn(scanner.EOF) + if err != nil { + err = errors.Wrapf(err, "failed to process %q", source) + } + return word, words, err +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result bytes.Buffer + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result.WriteString(tmp) + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == sw.escapeToken { + // '\' (default escape token, but ` allowed) escapes, except end of line + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result.WriteRune(ch) + } + } + + return result.String(), words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + // + // From the "sh" man page: + // Single Quotes + // Enclosing characters in single quotes preserves the literal meaning of + // all the characters (except single quotes, making it impossible to put + // single-quotes in a single-quoted string). + + var result bytes.Buffer + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + switch ch { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching single-quote") + case '\'': + return result.String(), nil + } + result.WriteRune(ch) + } +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ (or ` if escape token set accordingly) + // + // From the "sh" man page: + // Double Quotes + // Enclosing characters within double quotes preserves the literal meaning + // of all characters except dollarsign ($), backquote (`), and backslash + // (\). The backslash inside double quotes is historically weird, and + // serves to quote only the following characters: + // $ ` " \ . + // Otherwise it remains literal. + + var result bytes.Buffer + + sw.scanner.Next() + + for { + switch sw.scanner.Peek() { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching double-quote") + case '"': + sw.scanner.Next() + return result.String(), nil + case '$': + value, err := sw.processDollar() + if err != nil { + return "", err + } + result.WriteString(value) + default: + ch := sw.scanner.Next() + if ch == sw.escapeToken { + switch sw.scanner.Peek() { + case scanner.EOF: + // Ignore \ at end of word + continue + case '"', '$', sw.escapeToken: + // These chars can be escaped, all other \'s are left as-is + // Note: for now don't do anything special with ` chars. + // Not sure what to do with them anyway since we're not going + // to execute the text in there (not now anyway). + ch = sw.scanner.Next() + } + } + result.WriteRune(ch) + } + } +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + + // $xxx case + if sw.scanner.Peek() != '{' { + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil + } + + sw.scanner.Next() + name := sw.processName() + ch := sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) + } + } + return "", errors.Errorf("missing ':' in substitution") +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name bytes.Buffer + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if name.Len() == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name.WriteRune(ch) + } + + return name.String() +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if equalEnvKeys(name, env) { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + compareName := env[:i] + if !equalEnvKeys(name, compareName) { + continue + } + return env[i+1:] + } + return "" +} diff --git a/builder/dockerfile/shell_parser_test.go b/builder/dockerfile/shell_parser_test.go new file mode 100644 index 0000000..c4f7e0e --- /dev/null +++ b/builder/dockerfile/shell_parser_test.go @@ -0,0 +1,151 @@ +package dockerfile + +import ( + "bufio" + "os" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestShellParser4EnvVars(t *testing.T) { + fn := "envVarTest" + lineCount := 0 + + file, err := os.Open(fn) + assert.NoError(t, err) + defer file.Close() + + shlex := NewShellLex('\\') + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} + for scanner.Scan() { + line := scanner.Text() + lineCount++ + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + assert.Len(t, words, 3) + + platform := strings.TrimSpace(words[0]) + source := strings.TrimSpace(words[1]) + expected := strings.TrimSpace(words[2]) + + // Key W=Windows; A=All; U=Unix + if platform != "W" && platform != "A" && platform != "U" { + t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", platform, lineCount, fn) + } + + if ((platform == "W" || platform == "A") && runtime.GOOS == "windows") || + ((platform == "U" || platform == "A") && runtime.GOOS != "windows") { + newWord, err := shlex.ProcessWord(source, envs) + if expected == "error" { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, newWord, expected) + } + } + } +} + +func TestShellParser4Words(t *testing.T) { + fn := "wordsTest" + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + shlex := NewShellLex('\\') + envs := []string{} + scanner := bufio.NewScanner(file) + lineNum := 0 + for scanner.Scan() { + line := scanner.Text() + lineNum = lineNum + 1 + + if strings.HasPrefix(line, "#") { + continue + } + + if strings.HasPrefix(line, "ENV ") { + line = strings.TrimLeft(line[3:], " ") + envs = append(envs, line) + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in '%s'(line %d) - should be exactly one | in: %q", fn, lineNum, line) + } + test := strings.TrimSpace(words[0]) + expected := strings.Split(strings.TrimLeft(words[1], " "), ",") + + result, err := shlex.ProcessWords(test, envs) + + if err != nil { + result = []string{"error"} + } + + if len(result) != len(expected) { + t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) + } + for i, w := range expected { + if w != result[i] { + t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result) + } + } + } +} + +func TestGetEnv(t *testing.T) { + sw := &shellWord{envs: nil} + + sw.envs = []string{} + if sw.getEnv("foo") != "" { + t.Fatal("2 - 'foo' should map to ''") + } + + sw.envs = []string{"foo"} + if sw.getEnv("foo") != "" { + t.Fatal("3 - 'foo' should map to ''") + } + + sw.envs = []string{"foo="} + if sw.getEnv("foo") != "" { + t.Fatal("4 - 'foo' should map to ''") + } + + sw.envs = []string{"foo=bar"} + if sw.getEnv("foo") != "bar" { + t.Fatal("5 - 'foo' should map to 'bar'") + } + + sw.envs = []string{"foo=bar", "car=hat"} + if sw.getEnv("foo") != "bar" { + t.Fatal("6 - 'foo' should map to 'bar'") + } + if sw.getEnv("car") != "hat" { + t.Fatal("7 - 'car' should map to 'hat'") + } + + // Make sure we grab the first 'car' in the list + sw.envs = []string{"foo=bar", "car=hat", "car=bike"} + if sw.getEnv("car") != "hat" { + t.Fatal("8 - 'car' should map to 'hat'") + } +} diff --git a/builder/dockerfile/support.go b/builder/dockerfile/support.go new file mode 100644 index 0000000..e875889 --- /dev/null +++ b/builder/dockerfile/support.go @@ -0,0 +1,19 @@ +package dockerfile + +import "strings" + +// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile +// for exec form it returns untouched args slice +// for shell form it returns concatenated args as the first element of a slice +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/builder/dockerfile/support_test.go b/builder/dockerfile/support_test.go new file mode 100644 index 0000000..7cc6fe9 --- /dev/null +++ b/builder/dockerfile/support_test.go @@ -0,0 +1,65 @@ +package dockerfile + +import "testing" + +type testCase struct { + name string + args []string + attributes map[string]bool + expected []string +} + +func initTestCases() []testCase { + testCases := []testCase{} + + testCases = append(testCases, testCase{ + name: "empty args", + args: []string{}, + attributes: make(map[string]bool), + expected: []string{}, + }) + + jsonAttributes := make(map[string]bool) + jsonAttributes["json"] = true + + testCases = append(testCases, testCase{ + name: "json attribute with one element", + args: []string{"foo"}, + attributes: jsonAttributes, + expected: []string{"foo"}, + }) + + testCases = append(testCases, testCase{ + name: "json attribute with two elements", + args: []string{"foo", "bar"}, + attributes: jsonAttributes, + expected: []string{"foo", "bar"}, + }) + + testCases = append(testCases, testCase{ + name: "no attributes", + args: []string{"foo", "bar"}, + attributes: nil, + expected: []string{"foo bar"}, + }) + + return testCases +} + +func TestHandleJSONArgs(t *testing.T) { + testCases := initTestCases() + + for _, test := range testCases { + arguments := handleJSONArgs(test.args, test.attributes) + + if len(arguments) != len(test.expected) { + t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) + } + + for i := range test.expected { + if arguments[i] != test.expected[i] { + t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) + } + } + } +} diff --git a/builder/dockerfile/utils_test.go b/builder/dockerfile/utils_test.go new file mode 100644 index 0000000..80a3f1b --- /dev/null +++ b/builder/dockerfile/utils_test.go @@ -0,0 +1,50 @@ +package dockerfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// createTestSymlink creates a symlink file within dir which points to oldname +func createTestSymlink(t *testing.T, dir, filename, oldname string) string { + filePath := filepath.Join(dir, filename) + if err := os.Symlink(oldname, filePath); err != nil { + t.Fatalf("Error when creating %s symlink to %s: %s", filename, oldname, err) + } + + return filePath +} diff --git a/builder/dockerfile/wordsTest b/builder/dockerfile/wordsTest new file mode 100644 index 0000000..1fd9f19 --- /dev/null +++ b/builder/dockerfile/wordsTest @@ -0,0 +1,30 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | error +hello\" there | hello",there +hello"\\there" | hello\there +hello"\there" | hello\there +hello'\\there' | hello\\there +hello'\there' | hello\there +hello'$there' | hello$there diff --git a/builder/dockerignore/dockerignore.go b/builder/dockerignore/dockerignore.go new file mode 100644 index 0000000..cc22381 --- /dev/null +++ b/builder/dockerignore/dockerignore.go @@ -0,0 +1,64 @@ +package dockerignore + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + // normalize absolute paths to paths relative to the context + // (taking care of '!' prefix) + invert := pattern[0] == '!' + if invert { + pattern = strings.TrimSpace(pattern[1:]) + } + if len(pattern) > 0 { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + if len(pattern) > 1 && pattern[0] == '/' { + pattern = pattern[1:] + } + } + if invert { + pattern = "!" + pattern + } + + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/builder/dockerignore/dockerignore_test.go b/builder/dockerignore/dockerignore_test.go new file mode 100644 index 0000000..bda3874 --- /dev/null +++ b/builder/dockerignore/dockerignore_test.go @@ -0,0 +1,69 @@ +package dockerignore + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReadAll(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + di, err := ReadAll(nil) + if err != nil { + t.Fatalf("Expected not to have error, got %v", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + diName := filepath.Join(tmpDir, ".dockerignore") + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile\n# this is a comment\n! /inverted/abs/path\n!\n! \n") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + diFd, err := os.Open(diName) + if err != nil { + t.Fatal(err) + } + defer diFd.Close() + + di, err = ReadAll(diFd) + if err != nil { + t.Fatal(err) + } + + if len(di) != 7 { + t.Fatalf("Expected 5 entries, got %v", len(di)) + } + if di[0] != "test1" { + t.Fatal("First element is not test1") + } + if di[1] != "test2" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar + t.Fatal("Second element is not test2") + } + if di[2] != "a/file/here" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar + t.Fatal("Third element is not a/file/here") + } + if di[3] != "lastfile" { + t.Fatal("Fourth element is not lastfile") + } + if di[4] != "!inverted/abs/path" { + t.Fatal("Fifth element is not !inverted/abs/path") + } + if di[5] != "!" { + t.Fatalf("Sixth element is not !, but %s", di[5]) + } + if di[6] != "!" { + t.Fatalf("Sixth element is not !, but %s", di[6]) + } +} diff --git a/builder/fscache/fscache.go b/builder/fscache/fscache.go new file mode 100644 index 0000000..802db96 --- /dev/null +++ b/builder/fscache/fscache.go @@ -0,0 +1,602 @@ +package fscache + +import ( + "encoding/json" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/client/session/filesync" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + "golang.org/x/net/context" + "golang.org/x/sync/singleflight" +) + +const dbFile = "fscache.db" +const cacheKey = "cache" +const metaKey = "meta" + +// Backend is a backing implementation for FSCache +type Backend interface { + Get(id string) (string, error) + Remove(id string) error +} + +// FSCache allows syncing remote resources to cached snapshots +type FSCache struct { + opt Opt + transports map[string]Transport + mu sync.Mutex + g singleflight.Group + store *fsCacheStore +} + +// Opt defines options for initializing FSCache +type Opt struct { + Backend Backend + Root string // for storing local metadata + GCPolicy GCPolicy +} + +// GCPolicy defines policy for garbage collection +type GCPolicy struct { + MaxSize uint64 + MaxKeepDuration time.Duration +} + +// NewFSCache returns new FSCache object +func NewFSCache(opt Opt) (*FSCache, error) { + store, err := newFSCacheStore(opt) + if err != nil { + return nil, err + } + return &FSCache{ + store: store, + opt: opt, + transports: make(map[string]Transport), + }, nil +} + +// Transport defines a method for syncing remote data to FSCache +type Transport interface { + Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error +} + +// RemoteIdentifier identifies a transfer request +type RemoteIdentifier interface { + Key() string + SharedKey() string + Transport() string +} + +// RegisterTransport registers a new transport method +func (fsc *FSCache) RegisterTransport(id string, transport Transport) error { + fsc.mu.Lock() + defer fsc.mu.Unlock() + if _, ok := fsc.transports[id]; ok { + return errors.Errorf("transport %v already exists", id) + } + fsc.transports[id] = transport + return nil +} + +// SyncFrom returns a source based on a remote identifier +func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt + trasportID := id.Transport() + fsc.mu.Lock() + transport, ok := fsc.transports[id.Transport()] + if !ok { + fsc.mu.Unlock() + return nil, errors.Errorf("invalid transport %s", trasportID) + } + + logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey()) + fsc.mu.Unlock() + sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) { + var sourceRef *cachedSourceRef + sourceRef, err := fsc.store.Get(id.Key()) + if err == nil { + return sourceRef, nil + } + + // check for unused shared cache + sharedKey := id.SharedKey() + if sharedKey != "" { + r, err := fsc.store.Rebase(sharedKey, id.Key()) + if err == nil { + sourceRef = r + } + } + + if sourceRef == nil { + var err error + sourceRef, err = fsc.store.New(id.Key(), sharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create remote context") + } + } + + if err := syncFrom(ctx, sourceRef, transport, id); err != nil { + sourceRef.Release() + return nil, err + } + if err := sourceRef.resetSize(-1); err != nil { + return nil, err + } + return sourceRef, nil + }) + if err != nil { + return nil, err + } + ref := sourceRef.(*cachedSourceRef) + if ref.src == nil { // failsafe + return nil, errors.Errorf("invalid empty pull") + } + wc := &wrappedContext{Source: ref.src, closer: func() error { + ref.Release() + return nil + }} + return wc, nil +} + +// DiskUsage reports how much data is allocated by the cache +func (fsc *FSCache) DiskUsage() (int64, error) { + return fsc.store.DiskUsage() +} + +// Prune allows manually cleaning up the cache +func (fsc *FSCache) Prune() (uint64, error) { + return fsc.store.Prune() +} + +// Close stops the gc and closes the persistent db +func (fsc *FSCache) Close() error { + return fsc.store.Close() +} + +func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) { + src := cs.src + if src == nil { + src = remotecontext.NewCachableSource(cs.Dir()) + } + + if !cs.cached { + if err := cs.storage.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(id.Key())) + dt := b.Get([]byte(cacheKey)) + if dt != nil { + if err := src.UnmarshalBinary(dt); err != nil { + return err + } + } else { + return errors.Wrap(src.Scan(), "failed to scan cache records") + } + return nil + }); err != nil { + return err + } + } + + dc := &detectChanges{f: src.HandleChange} + + // todo: probably send a bucket to `Copy` and let it return source + // but need to make sure that tx is safe + if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil { + return errors.Wrapf(err, "failed to copy to %s", cs.Dir()) + } + + if !dc.supported { + if err := src.Scan(); err != nil { + return errors.Wrap(err, "failed to scan cache records after transfer") + } + } + cs.cached = true + cs.src = src + return cs.storage.db.Update(func(tx *bolt.Tx) error { + dt, err := src.MarshalBinary() + if err != nil { + return err + } + b := tx.Bucket([]byte(id.Key())) + return b.Put([]byte(cacheKey), dt) + }) +} + +type fsCacheStore struct { + root string + mu sync.Mutex + sources map[string]*cachedSource + db *bolt.DB + fs Backend + gcTimer *time.Timer + gcPolicy GCPolicy +} + +// CachePolicy defines policy for keeping a resource in cache +type CachePolicy struct { + Priority int + LastUsed time.Time +} + +func defaultCachePolicy() CachePolicy { + return CachePolicy{Priority: 10, LastUsed: time.Now()} +} + +func newFSCacheStore(opt Opt) (*fsCacheStore, error) { + if err := os.MkdirAll(opt.Root, 0700); err != nil { + return nil, err + } + p := filepath.Join(opt.Root, dbFile) + db, err := bolt.Open(p, 0600, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to open database file %s") + } + s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy} + db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + dt := b.Get([]byte(metaKey)) + if dt == nil { + return nil + } + var sm sourceMeta + if err := json.Unmarshal(dt, &sm); err != nil { + return err + } + dir, err := s.fs.Get(sm.BackendID) + if err != nil { + return err // TODO: handle gracefully + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: string(name), + dir: dir, + sourceMeta: sm, + storage: s, + } + s.sources[string(name)] = source + return nil + }) + }) + + s.gcTimer = s.startPeriodicGC(5 * time.Minute) + return s, nil +} + +func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer { + var t *time.Timer + t = time.AfterFunc(interval, func() { + if err := s.GC(); err != nil { + logrus.Errorf("build gc error: %v", err) + } + t.Reset(interval) + }) + return t +} + +func (s *fsCacheStore) Close() error { + s.gcTimer.Stop() + return s.db.Close() +} + +func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + if err := s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte(id)) + if err != nil { + return err + } + backendID := stringid.GenerateRandomID() + dir, err := s.fs.Get(backendID) + if err != nil { + return err + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: id, + dir: dir, + sourceMeta: sourceMeta{ + BackendID: backendID, + SharedKey: sharedKey, + CachePolicy: defaultCachePolicy(), + }, + storage: s, + } + dt, err := json.Marshal(source.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + s.sources[id] = source + ret = source + return nil + }); err != nil { + return nil, err + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + for id, snap := range s.sources { + if snap.SharedKey == sharedKey && len(snap.refs) == 0 { + if err := s.db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte(id)); err != nil { + return err + } + b, err := tx.CreateBucket([]byte(newid)) + if err != nil { + return err + } + snap.id = newid + snap.CachePolicy = defaultCachePolicy() + dt, err := json.Marshal(snap.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + delete(s.sources, id) + s.sources[newid] = snap + return nil + }); err != nil { + return nil, err + } + ret = snap + break + } + } + if ret == nil { + return nil, errors.Errorf("no candidate for rebase") + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + src, ok := s.sources[id] + if !ok { + return nil, errors.Errorf("not found") + } + return src.getRef(), nil +} + +// DiskUsage reports how much data is allocated by the cache +func (s *fsCacheStore) DiskUsage() (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size int64 + + for _, snap := range s.sources { + if len(snap.refs) == 0 { + ss, err := snap.getSize() + if err != nil { + return 0, err + } + size += ss + } + } + return size, nil +} + +// Prune allows manually cleaning up the cache +func (s *fsCacheStore) Prune() (uint64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + for id, snap := range s.sources { + if len(snap.refs) == 0 { + ss, err := snap.getSize() + if err != nil { + return size, err + } + if err := s.delete(id); err != nil { + return size, errors.Wrapf(err, "failed to delete %s", id) + } + size += uint64(ss) + } + } + return size, nil +} + +// GC runs a garbage collector on FSCache +func (s *fsCacheStore) GC() error { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration) + var blacklist []*cachedSource + + for id, snap := range s.sources { + if len(snap.refs) == 0 { + if cutoff.After(snap.CachePolicy.LastUsed) { + if err := s.delete(id); err != nil { + return errors.Wrapf(err, "failed to delete %s", id) + } + } else { + ss, err := snap.getSize() + if err != nil { + return err + } + size += uint64(ss) + blacklist = append(blacklist, snap) + } + } + } + + sort.Sort(sortableCacheSources(blacklist)) + for _, snap := range blacklist { + if size <= s.gcPolicy.MaxSize { + break + } + ss, err := snap.getSize() + if err != nil { + return err + } + if err := s.delete(snap.id); err != nil { + return errors.Wrapf(err, "failed to delete %s", snap.id) + } + size -= uint64(ss) + } + return nil +} + +// keep mu while calling this +func (s *fsCacheStore) delete(id string) error { + src, ok := s.sources[id] + if !ok { + return nil + } + if len(src.refs) > 0 { + return errors.Errorf("can't delete %s because it has active references", id) + } + delete(s.sources, id) + if err := s.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket([]byte(id)) + }); err != nil { + return err + } + if err := s.fs.Remove(src.BackendID); err != nil { + return err + } + return nil +} + +type sourceMeta struct { + SharedKey string + BackendID string + CachePolicy CachePolicy + Size int64 +} + +type cachedSource struct { + sourceMeta + refs map[*cachedSourceRef]struct{} + id string + dir string + src *remotecontext.CachableSource + storage *fsCacheStore + cached bool // keep track if cache is up to date +} + +type cachedSourceRef struct { + *cachedSource +} + +func (cs *cachedSource) Dir() string { + return cs.dir +} + +// hold storage lock before calling +func (cs *cachedSource) getRef() *cachedSourceRef { + ref := &cachedSourceRef{cachedSource: cs} + cs.refs[ref] = struct{}{} + return ref +} + +// hold storage lock before calling +func (cs *cachedSource) getSize() (int64, error) { + if cs.sourceMeta.Size < 0 { + ss, err := directory.Size(cs.dir) + if err != nil { + return 0, err + } + if err := cs.resetSize(ss); err != nil { + return 0, err + } + return ss, nil + } + return cs.sourceMeta.Size, nil +} + +func (cs *cachedSource) resetSize(val int64) error { + cs.sourceMeta.Size = val + return cs.saveMeta() +} +func (cs *cachedSource) saveMeta() error { + return cs.storage.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(cs.id)) + dt, err := json.Marshal(cs.sourceMeta) + if err != nil { + return err + } + return b.Put([]byte(metaKey), dt) + }) +} + +func (csr *cachedSourceRef) Release() error { + csr.cachedSource.storage.mu.Lock() + defer csr.cachedSource.storage.mu.Unlock() + delete(csr.cachedSource.refs, csr) + if len(csr.cachedSource.refs) == 0 { + go csr.cachedSource.storage.GC() + } + return nil +} + +type detectChanges struct { + f fsutil.ChangeFunc + supported bool +} + +func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error { + if dc == nil { + return nil + } + return dc.f(kind, path, fi, err) +} + +func (dc *detectChanges) MarkSupported(v bool) { + if dc == nil { + return + } + dc.supported = v +} + +type wrappedContext struct { + builder.Source + closer func() error +} + +func (wc *wrappedContext) Close() error { + if err := wc.Source.Close(); err != nil { + return err + } + return wc.closer() +} + +type sortableCacheSources []*cachedSource + +// Len is the number of elements in the collection. +func (s sortableCacheSources) Len() int { + return len(s) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (s sortableCacheSources) Less(i, j int) bool { + return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed) +} + +// Swap swaps the elements with indexes i and j. +func (s sortableCacheSources) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/builder/fscache/fscache_test.go b/builder/fscache/fscache_test.go new file mode 100644 index 0000000..c7c0531 --- /dev/null +++ b/builder/fscache/fscache_test.go @@ -0,0 +1,131 @@ +package fscache + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/client/session/filesync" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestFSCache(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "fscache") + assert.Nil(t, err) + defer os.RemoveAll(tmpDir) + + backend := NewNaiveCacheBackend(filepath.Join(tmpDir, "backend")) + + opt := Opt{ + Root: tmpDir, + Backend: backend, + GCPolicy: GCPolicy{MaxSize: 15, MaxKeepDuration: time.Hour}, + } + + fscache, err := NewFSCache(opt) + assert.Nil(t, err) + + defer fscache.Close() + + err = fscache.RegisterTransport("test", &testTransport{}) + assert.Nil(t, err) + + src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"}) + assert.Nil(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data") + + // same id doesn't recalculate anything + src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"}) + assert.Nil(t, err) + assert.Equal(t, src1.Root(), src2.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data") + assert.Nil(t, src2.Close()) + + src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"}) + assert.Nil(t, err) + assert.NotEqual(t, src1.Root(), src3.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data2") + + s, err := fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(0)) + + assert.Nil(t, src3.Close()) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(5)) + + // new upload with the same shared key shoutl overwrite + src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"}) + assert.Nil(t, err) + assert.NotEqual(t, src1.Root(), src3.Root()) + + dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3")) + assert.Nil(t, err) + assert.Equal(t, string(dt), "data3") + assert.Equal(t, src4.Root(), src3.Root()) + assert.Nil(t, src4.Close()) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(10)) + + // this one goes over the GC limit + src5, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo4", "datadata", "baz"}) + assert.Nil(t, err) + assert.Nil(t, src5.Close()) + + // GC happens async + time.Sleep(100 * time.Millisecond) + + // only last insertion after GC + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(8)) + + // prune deletes everything + released, err := fscache.Prune() + assert.Nil(t, err) + assert.Equal(t, released, uint64(8)) + + s, err = fscache.DiskUsage() + assert.Nil(t, err) + assert.Equal(t, s, int64(0)) +} + +type testTransport struct { +} + +func (t *testTransport) Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error { + testid := id.(*testIdentifier) + return ioutil.WriteFile(filepath.Join(dest, testid.filename), []byte(testid.data), 0600) +} + +type testIdentifier struct { + filename string + data string + sharedKey string +} + +func (t *testIdentifier) Key() string { + return t.filename +} +func (t *testIdentifier) SharedKey() string { + return t.sharedKey +} +func (t *testIdentifier) Transport() string { + return "test" +} diff --git a/builder/fscache/naivedriver.go b/builder/fscache/naivedriver.go new file mode 100644 index 0000000..f40ee57 --- /dev/null +++ b/builder/fscache/naivedriver.go @@ -0,0 +1,28 @@ +package fscache + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// NewNaiveCacheBackend is a basic backend implementation for fscache +func NewNaiveCacheBackend(root string) Backend { + return &naiveCacheBackend{root: root} +} + +type naiveCacheBackend struct { + root string +} + +func (tcb *naiveCacheBackend) Get(id string) (string, error) { + d := filepath.Join(tcb.root, id) + if err := os.MkdirAll(d, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create tmp dir for %s", d) + } + return d, nil +} +func (tcb *naiveCacheBackend) Remove(id string) error { + return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id))) +} diff --git a/builder/remotecontext/archive.go b/builder/remotecontext/archive.go new file mode 100644 index 0000000..f48cafe --- /dev/null +++ b/builder/remotecontext/archive.go @@ -0,0 +1,128 @@ +package remotecontext + +import ( + "io" + "os" + "path/filepath" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" + "github.com/pkg/errors" +) + +type archiveContext struct { + root string + sums tarsum.FileInfoSums +} + +func (c *archiveContext) Close() error { + return os.RemoveAll(c.root) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +type modifiableContext interface { + builder.Source + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FromArchive returns a build source from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func FromArchive(tarStream io.Reader) (builder.Source, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + tsc := &archiveContext{root: root} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + err = chrootarchive.Untar(sum, root, nil) + if err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + + return tsc, nil +} + +func (c *archiveContext) Root() string { + return c.root +} + +func (c *archiveContext) Remove(path string) error { + _, fullpath, err := normalize(path, c.root) + if err != nil { + return err + } + return os.RemoveAll(fullpath) +} + +func (c *archiveContext) Hash(path string) (string, error) { + cleanpath, fullpath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return "", convertPathError(err, cleanpath) + } + + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + return tsInfo.Sum(), nil + } + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + return path, nil // backwards compat TODO: see if really needed +} + +func normalize(path, root string) (cleanPath, fullPath string, err error) { + cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root) + if err != nil { + return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) + } + if _, err := os.Lstat(fullPath); err != nil { + return "", "", errors.WithStack(convertPathError(err, path)) + } + return +} diff --git a/builder/remotecontext/detect.go b/builder/remotecontext/detect.go new file mode 100644 index 0000000..4345736 --- /dev/null +++ b/builder/remotecontext/detect.go @@ -0,0 +1,184 @@ +package remotecontext + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +// ClientSessionRemote is identifier for client-session context transport +const ClientSessionRemote = "client-session" + +// Detect returns a context and dockerfile from remote location or local +// archive. progressReader is only used if remoteURL is actually a URL +// (not empty, and not a Git endpoint). +func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { + remoteURL := config.Options.RemoteContext + dockerfilePath := config.Options.Dockerfile + + switch { + case remoteURL == "": + remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) + case remoteURL == ClientSessionRemote: + res, err := parser.Parse(config.Source) + if err != nil { + return nil, nil, err + } + return nil, res, nil + case urlutil.IsGitURL(remoteURL): + remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) + case urlutil.IsURL(remoteURL): + remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { + defer rc.Close() + c, err := FromArchive(rc) + if err != nil { + return nil, nil, err + } + + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { + df, err := openAt(c, dockerfilePath) + if err != nil { + if os.IsNotExist(err) { + if dockerfilePath == builder.DefaultDockerfileName { + lowercase := strings.ToLower(dockerfilePath) + if _, err := StatAt(c, lowercase); err == nil { + return withDockerfileFromContext(c, lowercase) + } + } + return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error + } + c.Close() + return nil, nil, err + } + + res, err := readAndParseDockerfile(dockerfilePath, df) + if err != nil { + return nil, nil, err + } + + df.Close() + + if err := removeDockerfile(c, dockerfilePath); err != nil { + c.Close() + return nil, nil, err + } + + return c, res, nil +} + +func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { + c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { + var dockerfile io.ReadCloser + dockerfileFoundErr := errors.New("found-dockerfile") + c, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile = rc + return nil, dockerfileFoundErr + }, + // fallback handler (tar context) + "": func(rc io.ReadCloser) (io.ReadCloser, error) { + return progressReader(rc), nil + }, + }) + switch { + case err == dockerfileFoundErr: + res, err := parser.Parse(dockerfile) + return nil, res, err + case err != nil: + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func removeDockerfile(c modifiableContext, filesToRemove ...string) error { + f, err := openAt(c, ".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + switch { + case os.IsNotExist(err): + return nil + case err != nil: + return err + } + excludes, err := dockerignore.ReadAll(f) + if err != nil { + f.Close() + return err + } + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { + if err := c.Remove(fileToRemove); err != nil { + logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + } + } + } + return nil +} + +func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { + br := bufio.NewReader(rc) + if _, err := br.Peek(1); err != nil { + if err == io.EOF { + return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name) + } + return nil, errors.Wrap(err, "unexpected error reading Dockerfile") + } + return parser.Parse(br) +} + +func openAt(remote builder.Source, path string) (*os.File, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return os.Open(fullPath) +} + +// StatAt is a helper for calling Stat on a path from a source +func StatAt(remote builder.Source, path string) (os.FileInfo, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return os.Stat(fullPath) +} + +// FullPath is a helper for getting a full path for a path from a source +func FullPath(remote builder.Source, path string) (string, error) { + fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root()) + if err != nil { + return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error + } + return fullPath, nil +} diff --git a/builder/remotecontext/detect_test.go b/builder/remotecontext/detect_test.go new file mode 100644 index 0000000..6b47ac2 --- /dev/null +++ b/builder/remotecontext/detect_test.go @@ -0,0 +1,123 @@ +package remotecontext + +import ( + "errors" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/docker/docker/builder" +) + +const ( + dockerfileContents = "FROM busybox" + dockerignoreFilename = ".dockerignore" + testfileContents = "test" +) + +const shouldStayFilename = "should_stay" + +func extractFilenames(files []os.FileInfo) []string { + filenames := make([]string, len(files), len(files)) + + for i, file := range files { + filenames[i] = file.Name() + } + + return filenames +} + +func checkDirectory(t *testing.T, dir string, expectedFiles []string) { + files, err := ioutil.ReadDir(dir) + + if err != nil { + t.Fatalf("Could not read directory: %s", err) + } + + if len(files) != len(expectedFiles) { + log.Fatalf("Directory should contain exactly %d file(s), got %d", len(expectedFiles), len(files)) + } + + filenames := extractFilenames(files) + sort.Strings(filenames) + sort.Strings(expectedFiles) + + for i, filename := range filenames { + if filename != expectedFiles[i] { + t.Fatalf("File %s should be in the directory, got: %s", expectedFiles[i], filename) + } + } +} + +func executeProcess(t *testing.T, contextDir string) { + modifiableCtx := &stubRemote{root: contextDir} + + err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when executing Process: %s", err) + } +} + +func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename}) + +} + +func TestProcessNoDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName}) + +} + +func TestProcessShouldLeaveAllFiles(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName, dockerignoreFilename}) + +} + +// TODO: remove after moving to a separate pkg +type stubRemote struct { + root string +} + +func (r *stubRemote) Hash(path string) (string, error) { + return "", errors.New("not implemented") +} + +func (r *stubRemote) Root() string { + return r.root +} +func (r *stubRemote) Close() error { + return errors.New("not implemented") +} +func (r *stubRemote) Remove(p string) error { + return os.Remove(filepath.Join(r.root, p)) +} diff --git a/builder/remotecontext/filehash.go b/builder/remotecontext/filehash.go new file mode 100644 index 0000000..4172302 --- /dev/null +++ b/builder/remotecontext/filehash.go @@ -0,0 +1,45 @@ +package remotecontext + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/tarsum" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + hdr, err := archive.FileInfoHeader(name, fi, link) + if err != nil { + return nil, err + } + if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return nil, err + } + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.hdr, tsh.Hash) +} diff --git a/builder/remotecontext/generate.go b/builder/remotecontext/generate.go new file mode 100644 index 0000000..0b52d49 --- /dev/null +++ b/builder/remotecontext/generate.go @@ -0,0 +1,3 @@ +package remotecontext + +//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/builder/remotecontext/git.go b/builder/remotecontext/git.go new file mode 100644 index 0000000..158bb5a --- /dev/null +++ b/builder/remotecontext/git.go @@ -0,0 +1,29 @@ +package remotecontext + +import ( + "os" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext/git" + "github.com/docker/docker/pkg/archive" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (builder.Source, error) { + root, err := git.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + // TODO: print errors? + c.Close() + os.RemoveAll(root) + }() + return FromArchive(c) +} diff --git a/builder/remotecontext/git/gitutils.go b/builder/remotecontext/git/gitutils.go new file mode 100644 index 0000000..b94d462 --- /dev/null +++ b/builder/remotecontext/git/gitutils.go @@ -0,0 +1,159 @@ +package git + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type gitRepo struct { + remote string + ref string + subdir string +} + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + repo, err := parseRemoteURL(remoteURL) + + if err != nil { + return "", err + } + + fetch := fetchArgs(repo.remote, repo.ref) + + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + if out, err := gitWithinDir(root, "init"); err != nil { + return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out) + } + + // Add origin remote for compatibility with previous implementation that + // used "git clone" and also to make sure local refs are created for branches + if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil { + return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out) + } + + if output, err := gitWithinDir(root, fetch...); err != nil { + return "", errors.Wrapf(err, "error fetching: %s", output) + } + + return checkoutGit(root, repo.ref, repo.subdir) +} + +func parseRemoteURL(remoteURL string) (gitRepo, error) { + repo := gitRepo{} + + if !isGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + + var fragment string + if strings.HasPrefix(remoteURL, "git@") { + // git@.. is not an URL, so cannot be parsed as URL + parts := strings.SplitN(remoteURL, "#", 2) + + repo.remote = parts[0] + if len(parts) == 2 { + fragment = parts[1] + } + repo.ref, repo.subdir = getRefAndSubdir(fragment) + } else { + u, err := url.Parse(remoteURL) + if err != nil { + return repo, err + } + + repo.ref, repo.subdir = getRefAndSubdir(u.Fragment) + u.Fragment = "" + repo.remote = u.String() + } + return repo, nil +} + +func getRefAndSubdir(fragment string) (ref string, subdir string) { + refAndDir := strings.SplitN(fragment, ":", 2) + ref = "master" + if len(refAndDir[0]) != 0 { + ref = refAndDir[0] + } + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + subdir = refAndDir[1] + } + return +} + +func fetchArgs(remoteURL string, ref string) []string { + args := []string{"fetch", "--recurse-submodules=yes"} + shallow := true + + if urlutil.IsURL(remoteURL) { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + return append(args, "origin", ref) +} + +func checkoutGit(root, ref, subdir string) (string, error) { + // Try checking out by ref name first. This will work on branches and sets + // .git/HEAD to the current branch name + if output, err := gitWithinDir(root, "checkout", ref); err != nil { + // If checking out by branch name fails check out the last fetched ref + if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil { + return "", errors.Wrapf(err, "error checking out %s: %s", ref, output) + } + } + + if subdir != "" { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root) + if err != nil { + return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", errors.Errorf("error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} + +// isGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func isGitTransport(str string) bool { + return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/builder/remotecontext/git/gitutils_test.go b/builder/remotecontext/git/gitutils_test.go new file mode 100644 index 0000000..c638a49 --- /dev/null +++ b/builder/remotecontext/git/gitutils_test.go @@ -0,0 +1,238 @@ +package git + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseRemoteURL(t *testing.T) { + dir, err := parseRemoteURL("git://github.com/user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git://github.com/user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("git://github.com/user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + + dir, err = parseRemoteURL("https://github.com/user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"https://github.com/user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("https://github.com/user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"https://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + + dir, err = parseRemoteURL("git@github.com:user/repo.git") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git@github.com:user/repo.git", "master", ""}, dir) + + dir, err = parseRemoteURL("git@github.com:user/repo.git#mybranch:mydir/mysubdir/") + require.NoError(t, err) + assert.NotEmpty(t, dir) + assert.Equal(t, gitRepo{"git@github.com:user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) +} + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := fetchArgs(serverURL.String(), "master") + exp := []string{"fetch", "--recurse-submodules=yes", "--depth", "1", "origin", "master"} + assert.Equal(t, exp, args) +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := fetchArgs(serverURL.String(), "master") + exp := []string{"fetch", "--recurse-submodules=yes", "origin", "master"} + assert.Equal(t, exp, args) +} + +func TestCloneArgsGit(t *testing.T) { + args := fetchArgs("git://github.com/docker/docker", "master") + exp := []string{"fetch", "--recurse-submodules=yes", "--depth", "1", "origin", "master"} + assert.Equal(t, exp, args) +} + +func gitGetConfig(name string) string { + b, err := git([]string{"config", "--get", name}...) + if err != nil { + // since we are interested in empty or non empty string, + // we can safely ignore the err here. + return "" + } + return strings.TrimSpace(string(b)) +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + require.NoError(t, err) + defer os.RemoveAll(root) + + autocrlf := gitGetConfig("core.autocrlf") + if !(autocrlf == "true" || autocrlf == "false" || + autocrlf == "input" || autocrlf == "") { + t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) + } + eol := "\n" + if autocrlf == "true" { + eol = "\r\n" + } + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644) + require.NoError(t, err) + + subDir := filepath.Join(gitDir, "subdir") + require.NoError(t, os.Mkdir(subDir, 0755)) + + err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644) + require.NoError(t, err) + + if runtime.GOOS != "windows" { + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + } + + _, err = gitWithinDir(gitDir, "add", "-A") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "First commit") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "checkout", "-b", "test") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644) + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644) + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "add", "-A") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit") + require.NoError(t, err) + + _, err = gitWithinDir(gitDir, "checkout", "master") + require.NoError(t, err) + + type singleCase struct { + frag string + exp string + fail bool + } + + cases := []singleCase{ + {"", "FROM scratch", false}, + {"master", "FROM scratch", false}, + {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {":nosubdir", "", true}, // missing directory error + {":Dockerfile", "", true}, // not a directory error + {"master:nosubdir", "", true}, + {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {"master:../subdir", "", true}, + {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, + } + + if runtime.GOOS != "windows" { + // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below + // git --work-tree .\repo --git-dir .\repo\.git add -A + // error: readlink("absolutelink"): Function not implemented + // error: unable to index file absolutelink + // fatal: adding files failed + cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + } + + for _, c := range cases { + ref, subdir := getRefAndSubdir(c.frag) + r, err := checkoutGit(gitDir, ref, subdir) + + if c.fail { + assert.Error(t, err) + continue + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + require.NoError(t, err) + assert.Equal(t, c.exp, string(b)) + } +} + +func TestValidGitTransport(t *testing.T) { + gitUrls := []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls := []string{ + "github.com/docker/docker", + } + + for _, url := range gitUrls { + if !isGitTransport(url) { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if isGitTransport(url) { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} diff --git a/builder/remotecontext/lazycontext.go b/builder/remotecontext/lazycontext.go new file mode 100644 index 0000000..b29c413 --- /dev/null +++ b/builder/remotecontext/lazycontext.go @@ -0,0 +1,101 @@ +package remotecontext + +import ( + "encoding/hex" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/pools" + "github.com/pkg/errors" +) + +// NewLazySource creates a new LazyContext. LazyContext defines a hashed build +// context based on a root directory. Individual files are hashed first time +// they are asked. It is not safe to call methods of LazyContext concurrently. +func NewLazySource(root string) (builder.Source, error) { + return &lazySource{ + root: root, + sums: make(map[string]string), + }, nil +} + +type lazySource struct { + root string + sums map[string]string +} + +func (c *lazySource) Root() string { + return c.root +} + +func (c *lazySource) Close() error { + return nil +} + +func (c *lazySource) Hash(path string) (string, error) { + cleanPath, fullPath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + fi, err := os.Lstat(fullPath) + if err != nil { + return "", errors.WithStack(err) + } + + relPath, err := Rel(c.root, fullPath) + if err != nil { + return "", errors.WithStack(convertPathError(err, cleanPath)) + } + + sum, ok := c.sums[relPath] + if !ok { + sum, err = c.prepareHash(relPath, fi) + if err != nil { + return "", err + } + } + + return sum, nil +} + +func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { + p := filepath.Join(c.root, relPath) + h, err := NewFileHash(p, relPath, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", relPath) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + f, err := os.Open(p) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", relPath) + } + defer f.Close() + if _, err := pools.Copy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) + } + } + sum := hex.EncodeToString(h.Sum(nil)) + c.sums[relPath] = sum + return sum, nil +} + +// Rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func Rel(basepath, targpath string) (string, error) { + // filepath.Rel can't handle UUID paths in windows + if runtime.GOOS == "windows" { + pfx := basepath + `\` + if strings.HasPrefix(targpath, pfx) { + p := strings.TrimPrefix(targpath, pfx) + if p == "" { + p = "." + } + return p, nil + } + } + return filepath.Rel(basepath, targpath) +} diff --git a/builder/remotecontext/mimetype.go b/builder/remotecontext/mimetype.go new file mode 100644 index 0000000..083d609 --- /dev/null +++ b/builder/remotecontext/mimetype.go @@ -0,0 +1,27 @@ +package remotecontext + +import ( + "mime" + "net/http" +) + +// mimeTypes stores the MIME content type. +var mimeTypes = struct { + TextPlain string + OctetStream string +}{"text/plain", "application/octet-stream"} + +// detectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func detectContentType(c []byte) (string, map[string]string, error) { + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + return contentType, args, nil +} diff --git a/builder/remotecontext/mimetype_test.go b/builder/remotecontext/mimetype_test.go new file mode 100644 index 0000000..8c00ec2 --- /dev/null +++ b/builder/remotecontext/mimetype_test.go @@ -0,0 +1,16 @@ +package remotecontext + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + contentType, _, err := detectContentType(input) + require.NoError(t, err) + assert.Equal(t, "text/plain", contentType) +} diff --git a/builder/remotecontext/remote.go b/builder/remotecontext/remote.go new file mode 100644 index 0000000..4afd516 --- /dev/null +++ b/builder/remotecontext/remote.go @@ -0,0 +1,134 @@ +package remotecontext + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + + "github.com/docker/docker/builder" + "github.com/pkg/errors" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// MakeRemoteContext downloads a context from remoteURL and returns it. +// +// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of +// maxPreambleLength bytes from the body to help detecting the MIME type. +// Look at acceptableRemoteMIME for more details. +// +// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected +// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). +// In either case, an (assumed) tar stream is passed to FromArchive whose result is returned. +func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (builder.Source, error) { + f, err := GetWithStatusError(remoteURL) + if err != nil { + return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) + } + defer f.Body.Close() + + var contextReader io.ReadCloser + if contentTypeHandlers != nil { + contentType := f.Header.Get("Content-Type") + clen := f.ContentLength + + contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) + if err != nil { + return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) + } + defer contextReader.Close() + + // This loop tries to find a content-type handler for the detected content-type. + // If it could not find one from the caller-supplied map, it tries the empty content-type `""` + // which is interpreted as a fallback handler (usually used for raw tar contexts). + for _, ct := range []string{contentType, ""} { + if fn, ok := contentTypeHandlers[ct]; ok { + defer contextReader.Close() + if contextReader, err = fn(contextReader); err != nil { + return nil, err + } + break + } + } + } + + // Pass through - this is a pre-packaged context, presumably + // with a Dockerfile with the right name inside it. + return FromArchive(contextReader) +} + +// GetWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func GetWithStatusError(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errors.Wrapf(err, msg+": error reading body") + } + return nil, errors.Errorf(msg+": %s", bytes.TrimSpace(body)) +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble[:rlen]) + bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == mimeTypes.OctetStream { + contentType, _, err = detectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/builder/remotecontext/remote_test.go b/builder/remotecontext/remote_test.go new file mode 100644 index 0000000..c698726 --- /dev/null +++ b/builder/remotecontext/remote_test.go @@ -0,0 +1,263 @@ +package remotecontext + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic + +func TestSelectAcceptableMIME(t *testing.T) { + validMimeStrings := []string{ + "application/x-bzip2", + "application/bzip2", + "application/gzip", + "application/x-gzip", + "application/x-xz", + "application/xz", + "application/tar", + "application/x-tar", + "application/octet-stream", + "text/plain", + } + + invalidMimeStrings := []string{ + "", + "application/octet", + "application/json", + } + + for _, m := range invalidMimeStrings { + if len(selectAcceptableMIME(m)) > 0 { + t.Fatalf("Should not have accepted %q", m) + } + } + + for _, m := range validMimeStrings { + if str := selectAcceptableMIME(m); str == "" { + t.Fatalf("Should have accepted %q", m) + } + } +} + +func TestInspectEmptyResponse(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader([]byte(""))) + contentType, bReader, err := inspectResponse(ct, br, 0) + if err == nil { + t.Fatal("Should have generated an error for an empty response") + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != 0 { + t.Fatal("response body should remain empty") + } +} + +func TestInspectResponseBinary(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader(binaryContext)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) + if err != nil { + t.Fatal(err) + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != len(binaryContext) { + t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) + } + for i := range body { + if body[i] != binaryContext[i] { + t.Fatalf("Corrupted response body at byte index %d", i) + } + } +} + +func TestResponseUnsupportedContentType(t *testing.T) { + content := []byte(dockerfileContents) + ct := "application/json" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(dockerfileContents))) + + if err == nil { + t.Fatal("Should have returned an error on content-type 'application/json'") + } + if contentType != ct { + t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseTextSimple(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseEmptyContentType(t *testing.T) { + content := []byte(dockerfileContents) + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bodyReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestUnknownContentLength(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, -1) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestMakeRemoteContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/" + builder.DefaultDockerfileName + remoteURL := serverURL.String() + + mux.Handle("/", http.FileServer(http.Dir(contextDir))) + + remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + r, err := archive.Generate(builder.DefaultDockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil + }, + }) + + if err != nil { + t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) + } + + if remoteContext == nil { + t.Fatal("Remote context should not be nil") + } + + h, err := remoteContext.Hash(builder.DefaultDockerfileName) + if err != nil { + t.Fatalf("failed to compute hash %s", err) + } + + if expected, actual := "7b6b6b66bee9e2102fbdc2228be6c980a2a23adf371962a37286a49f7de0f7cc", h; expected != actual { + t.Fatalf("There should be file named %s %s in fileInfoSums", expected, actual) + } +} + +func TestGetWithStatusError(t *testing.T) { + var testcases = []struct { + err error + statusCode int + expectedErr string + expectedBody string + }{ + { + statusCode: 200, + expectedBody: "THE BODY", + }, + { + statusCode: 400, + expectedErr: "with status 400 Bad Request: broke", + expectedBody: "broke", + }, + } + for _, testcase := range testcases { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buffer := bytes.NewBufferString(testcase.expectedBody) + w.WriteHeader(testcase.statusCode) + w.Write(buffer.Bytes()) + }), + ) + defer ts.Close() + response, err := GetWithStatusError(ts.URL) + + if testcase.expectedErr == "" { + require.NoError(t, err) + + body, err := testutil.ReadBody(response.Body) + require.NoError(t, err) + assert.Contains(t, string(body), testcase.expectedBody) + } else { + testutil.ErrorContains(t, err, testcase.expectedErr) + } + } +} diff --git a/builder/remotecontext/tarsum.go b/builder/remotecontext/tarsum.go new file mode 100644 index 0000000..3ae9d82 --- /dev/null +++ b/builder/remotecontext/tarsum.go @@ -0,0 +1,174 @@ +package remotecontext + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/symlink" + iradix "github.com/hashicorp/go-immutable-radix" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" +) + +type hashed interface { + Hash() string +} + +// CachableSource is a source that contains cache records for its contents +type CachableSource struct { + mu sync.Mutex + root string + tree *iradix.Tree + txn *iradix.Txn +} + +// NewCachableSource creates new CachableSource +func NewCachableSource(root string) *CachableSource { + ts := &CachableSource{ + tree: iradix.New(), + root: root, + } + return ts +} + +// MarshalBinary marshals current cache information to a byte array +func (cs *CachableSource) MarshalBinary() ([]byte, error) { + b := TarsumBackup{Hashes: make(map[string]string)} + root := cs.getRoot() + root.Walk(func(k []byte, v interface{}) bool { + b.Hashes[string(k)] = v.(*fileInfo).sum + return false + }) + return b.Marshal() +} + +// UnmarshalBinary decodes cache information for presented byte array +func (cs *CachableSource) UnmarshalBinary(data []byte) error { + var b TarsumBackup + if err := b.Unmarshal(data); err != nil { + return err + } + txn := iradix.New().Txn() + for p, v := range b.Hashes { + txn.Insert([]byte(p), &fileInfo{sum: v}) + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// Scan rescans the cache information from the file system +func (cs *CachableSource) Scan() error { + lc, err := NewLazySource(cs.root) + if err != nil { + return err + } + txn := iradix.New().Txn() + err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "failed to walk %s", path) + } + rel, err := Rel(cs.root, path) + if err != nil { + return err + } + h, err := lc.Hash(rel) + if err != nil { + return err + } + txn.Insert([]byte(rel), &fileInfo{sum: h}) + return nil + }) + if err != nil { + return err + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// HandleChange notifies the source about a modification operation +func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + cs.mu.Lock() + if cs.txn == nil { + cs.txn = cs.tree.Txn() + } + if kind == fsutil.ChangeKindDelete { + cs.txn.Delete([]byte(p)) + cs.mu.Unlock() + return + } + + h, ok := fi.(hashed) + if !ok { + cs.mu.Unlock() + return errors.Errorf("invalid fileinfo: %s", p) + } + + hfi := &fileInfo{ + sum: h.Hash(), + } + cs.txn.Insert([]byte(p), hfi) + cs.mu.Unlock() + return nil +} + +func (cs *CachableSource) getRoot() *iradix.Node { + cs.mu.Lock() + if cs.txn != nil { + cs.tree = cs.txn.Commit() + cs.txn = nil + } + t := cs.tree + cs.mu.Unlock() + return t.Root() +} + +// Close closes the source +func (cs *CachableSource) Close() error { + return nil +} + +func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) { + cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root) + if err != nil { + return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath) + } + _, err = os.Lstat(fullpath) + if err != nil { + return "", "", convertPathError(err, path) + } + return +} + +// Hash returns a hash for a single file in the source +func (cs *CachableSource) Hash(path string) (string, error) { + n := cs.getRoot() + sum := "" + // TODO: check this for symlinks + v, ok := n.Get([]byte(path)) + if !ok { + sum = path + } else { + sum = v.(*fileInfo).sum + } + return sum, nil +} + +// Root returns a root directory for the source +func (cs *CachableSource) Root() string { + return cs.root +} + +type fileInfo struct { + sum string +} + +func (fi *fileInfo) Hash() string { + return fi.sum +} diff --git a/builder/remotecontext/tarsum.pb.go b/builder/remotecontext/tarsum.pb.go new file mode 100644 index 0000000..561a7f6 --- /dev/null +++ b/builder/remotecontext/tarsum.pb.go @@ -0,0 +1,525 @@ +// Code generated by protoc-gen-gogo. +// source: tarsum.proto +// DO NOT EDIT! + +/* +Package remotecontext is a generated protocol buffer package. + +It is generated from these files: + tarsum.proto + +It has these top-level messages: + TarsumBackup +*/ +package remotecontext + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type TarsumBackup struct { + Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } +func (*TarsumBackup) ProtoMessage() {} +func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } + +func (m *TarsumBackup) GetHashes() map[string]string { + if m != nil { + return m.Hashes + } + return nil +} + +func init() { + proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") +} +func (this *TarsumBackup) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TarsumBackup) + if !ok { + that2, ok := that.(TarsumBackup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Hashes) != len(that1.Hashes) { + return false + } + for i := range this.Hashes { + if this.Hashes[i] != that1.Hashes[i] { + return false + } + } + return true +} +func (this *TarsumBackup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&remotecontext.TarsumBackup{") + keysForHashes := make([]string, 0, len(this.Hashes)) + for k, _ := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + if this.Hashes != nil { + s = append(s, "Hashes: "+mapStringForHashes+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTarsum(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hashes) > 0 { + for k, _ := range m.Hashes { + dAtA[i] = 0xa + i++ + v := m.Hashes[k] + mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *TarsumBackup) Size() (n int) { + var l int + _ = l + if len(m.Hashes) > 0 { + for k, v := range m.Hashes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) + } + } + return n +} + +func sovTarsum(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTarsum(x uint64) (n int) { + return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TarsumBackup) String() string { + if this == nil { + return "nil" + } + keysForHashes := make([]string, 0, len(this.Hashes)) + for k, _ := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + s := strings.Join([]string{`&TarsumBackup{`, + `Hashes:` + mapStringForHashes + `,`, + `}`, + }, "") + return s +} +func valueToStringTarsum(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TarsumBackup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTarsum + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Hashes == nil { + m.Hashes = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Hashes[mapkey] = mapvalue + } else { + var mapvalue string + m.Hashes[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTarsum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTarsum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTarsum(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTarsum + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTarsum(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } + +var fileDescriptorTarsum = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, + 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, + 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, + 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, + 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, + 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, + 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, + 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, + 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, + 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, + 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, + 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, + 0xe0, 0x00, 0x00, 0x00, +} diff --git a/builder/remotecontext/tarsum.proto b/builder/remotecontext/tarsum.proto new file mode 100644 index 0000000..cb94240 --- /dev/null +++ b/builder/remotecontext/tarsum.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package remotecontext; // no namespace because only used internally + +message TarsumBackup { + map Hashes = 1; +} \ No newline at end of file diff --git a/builder/remotecontext/tarsum_test.go b/builder/remotecontext/tarsum_test.go new file mode 100644 index 0000000..8a9d69b --- /dev/null +++ b/builder/remotecontext/tarsum_test.go @@ -0,0 +1,157 @@ +package remotecontext + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/pkg/errors" +) + +const ( + filename = "test" + contents = "contents test" +) + +func init() { + reexec.Init() +} + +func TestCloseRootDirectory(t *testing.T) { + contextDir, err := ioutil.TempDir("", "builder-tarsum-test") + defer os.RemoveAll(contextDir) + if err != nil { + t.Fatalf("Error with creating temporary directory: %s", err) + } + + src := makeTestArchiveContext(t, contextDir) + err = src.Close() + + if err != nil { + t.Fatalf("Error while executing Close: %s", err) + } + + _, err = os.Stat(src.Root()) + + if !os.IsNotExist(err) { + t.Fatal("Directory should not exist at this point") + } +} + +func TestHashFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0755) + + tarSum := makeTestArchiveContext(t, contextDir) + + sum, err := tarSum.Hash(filename) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "1149ab94af7be6cc1da1335e398f24ee1cf4926b720044d229969dfc248ae7ec" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestHashSubdir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := filepath.Join(contextDir, "builder-tarsum-test-subdir") + err := os.Mkdir(contextSubdir, 0755) + if err != nil { + t.Fatalf("Failed to make directory: %s", contextSubdir) + } + + testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0755) + + tarSum := makeTestArchiveContext(t, contextDir) + + relativePath, err := filepath.Rel(contextDir, testFilename) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + sum, err := tarSum.Hash(relativePath) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "d7f8d6353dee4816f9134f4156bf6a9d470fdadfb5d89213721f7e86744a4e69" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestStatNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + src := makeTestArchiveContext(t, contextDir) + _, err := src.Hash("not-existing") + if !os.IsNotExist(errors.Cause(err)) { + t.Fatalf("This file should not exist: %s", err) + } +} + +func TestRemoveDirectory(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + relativePath, err := filepath.Rel(contextDir, contextSubdir) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + src := makeTestArchiveContext(t, contextDir) + + tarSum := src.(modifiableContext) + + err = tarSum.Remove(relativePath) + if err != nil { + t.Fatalf("Error when executing Remove: %s", err) + } + + _, err = src.Hash(contextSubdir) + + if !os.IsNotExist(errors.Cause(err)) { + t.Fatal("Directory should not exist at this point") + } +} + +func makeTestArchiveContext(t *testing.T, dir string) builder.Source { + tarStream, err := archive.Tar(dir, archive.Uncompressed) + if err != nil { + t.Fatalf("error: %s", err) + } + defer tarStream.Close() + tarSum, err := FromArchive(tarStream) + if err != nil { + t.Fatalf("Error when executing FromArchive: %s", err) + } + return tarSum +} diff --git a/builder/remotecontext/utils_test.go b/builder/remotecontext/utils_test.go new file mode 100644 index 0000000..1e23ab4 --- /dev/null +++ b/builder/remotecontext/utils_test.go @@ -0,0 +1,55 @@ +package remotecontext + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempSubdir creates a temporary directory for testing. +// It returns the created path but doesn't provide a cleanup function, +// so createTestTempSubdir should be used only for creating temporary subdirectories +// whose parent directories are properly cleaned up. +// When an error occurs, it terminates the test. +func createTestTempSubdir(t *testing.T, dir, prefix string) string { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} diff --git a/cli/cobra.go b/cli/cobra.go new file mode 100644 index 0000000..c7bb39c --- /dev/null +++ b/cli/cobra.go @@ -0,0 +1,150 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/docker/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return nil + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{ wrappedFlagUsages . | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/cli/config/configdir.go b/cli/config/configdir.go new file mode 100644 index 0000000..de22577 --- /dev/null +++ b/cli/config/configdir.go @@ -0,0 +1,25 @@ +package config + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/homedir" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") + configFileDir = ".docker" +) + +// Dir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable. +// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves +func Dir() string { + return configDir +} + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} diff --git a/cli/debug/debug.go b/cli/debug/debug.go new file mode 100644 index 0000000..51dfab2 --- /dev/null +++ b/cli/debug/debug.go @@ -0,0 +1,26 @@ +package debug + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// Enable sets the DEBUG env var to true +// and makes the logger to log at debug level. +func Enable() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// Disable sets the DEBUG env var to false +// and makes the logger to log at info level. +func Disable() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsEnabled checks whether the debug flag is set or not. +func IsEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/cli/debug/debug_test.go b/cli/debug/debug_test.go new file mode 100644 index 0000000..ad8412a --- /dev/null +++ b/cli/debug/debug_test.go @@ -0,0 +1,43 @@ +package debug + +import ( + "os" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestEnable(t *testing.T) { + defer func() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) + }() + Enable() + if os.Getenv("DEBUG") != "1" { + t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.DebugLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) + } +} + +func TestDisable(t *testing.T) { + Disable() + if os.Getenv("DEBUG") != "" { + t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.InfoLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) + } +} + +func TestEnabled(t *testing.T) { + Enable() + if !IsEnabled() { + t.Fatal("expected debug enabled, got false") + } + Disable() + if IsEnabled() { + t.Fatal("expected debug disabled, got true") + } +} diff --git a/cli/error.go b/cli/error.go new file mode 100644 index 0000000..62f6243 --- /dev/null +++ b/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/cli/required.go b/cli/required.go new file mode 100644 index 0000000..d56bc21 --- /dev/null +++ b/cli/required.go @@ -0,0 +1,27 @@ +package cli + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return errors.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} diff --git a/client/README.md b/client/README.md new file mode 100644 index 0000000..059dfb3 --- /dev/null +++ b/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/client/build_prune.go b/client/build_prune.go new file mode 100644 index 0000000..ccab115 --- /dev/null +++ b/client/build_prune.go @@ -0,0 +1,30 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) + if err != nil { + return nil, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/client/checkpoint_create.go b/client/checkpoint_create.go new file mode 100644 index 0000000..0effe49 --- /dev/null +++ b/client/checkpoint_create.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/checkpoint_create_test.go b/client/checkpoint_create_test.go new file mode 100644 index 0000000..96e5187 --- /dev/null +++ b/client/checkpoint_create_test.go @@ -0,0 +1,73 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CheckpointCreate(context.Background(), "nothing", types.CheckpointCreateOptions{ + CheckpointID: "noting", + Exit: true, + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointCreate(t *testing.T) { + expectedContainerID := "container_id" + expectedCheckpointID := "checkpoint_id" + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + createOptions := &types.CheckpointCreateOptions{} + if err := json.NewDecoder(req.Body).Decode(createOptions); err != nil { + return nil, err + } + + if createOptions.CheckpointID != expectedCheckpointID { + return nil, fmt.Errorf("expected CheckpointID to be 'checkpoint_id', got %v", createOptions.CheckpointID) + } + + if !createOptions.Exit { + return nil, fmt.Errorf("expected Exit to be true") + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointCreate(context.Background(), expectedContainerID, types.CheckpointCreateOptions{ + CheckpointID: expectedCheckpointID, + Exit: true, + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/client/checkpoint_delete.go b/client/checkpoint_delete.go new file mode 100644 index 0000000..e6e7558 --- /dev/null +++ b/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/checkpoint_delete_test.go b/client/checkpoint_delete_test.go new file mode 100644 index 0000000..a78b050 --- /dev/null +++ b/client/checkpoint_delete_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointDeleteError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointDelete(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints/checkpoint_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/client/checkpoint_list.go b/client/checkpoint_list.go new file mode 100644 index 0000000..ffe44bc --- /dev/null +++ b/client/checkpoint_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return checkpoints, containerNotFoundError{container} + } + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/client/checkpoint_list_test.go b/client/checkpoint_list_test.go new file mode 100644 index 0000000..3884657 --- /dev/null +++ b/client/checkpoint_list_test.go @@ -0,0 +1,68 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointList(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]types.Checkpoint{ + { + Name: "checkpoint", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + checkpoints, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(checkpoints) != 1 { + t.Fatalf("expected 1 checkpoint, got %v", checkpoints) + } +} + +func TestCheckpointListContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "unknown", types.CheckpointListOptions{}) + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} diff --git a/client/client.go b/client/client.go new file mode 100644 index 0000000..222aa50 --- /dev/null +++ b/client/client.go @@ -0,0 +1,309 @@ +/* +Package client is a Go client for the Docker Engine API. + +The "docker" command uses this package to communicate with the daemon. It can also +be used by your own Go applications to do anything the command-line interface does +- running containers, pulling images, managing swarms, etc. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/reference/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewEnvClient, or +configured manually with NewClient. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "golang.org/x/net/context" +) + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specificlaly 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the TLS certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: CheckRedirect, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = api.DefaultVersion + } + + cli, err := NewClient(host, version, client, nil) + if err != nil { + return cli, err + } + if os.Getenv("DOCKER_API_VERSION") != "" { + cli.manualOverride = true + } + return cli, nil +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + if client != nil { + if _, ok := client.Transport.(*http.Transport); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + } else { + transport := new(http.Transport) + sockets.ConfigureTransport(transport, proto, addr) + client = &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + } + } + + scheme := "http" + tlsConfig := resolveTLSConfig(client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + scheme = "https" + } + + return &Client{ + scheme: scheme, + host: host, + proto: proto, + addr: addr, + basePath: basePath, + client: client, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// Close ensures that transport.Client is closed +// especially needed while using NewClient with *http.Client = nil +// for example +// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +func (cli *Client) Close() error { + + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = cli.basePath + "/v" + v + p + } else { + apiPath = cli.basePath + p + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +// This operation doesn't acquire a mutex. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// NegotiateAPIVersion updates the version string associated with this +// instance of the Client to match the latest version the server supports +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + ping, _ := cli.Ping(ctx) + cli.NegotiateAPIVersionPing(ping) +} + +// NegotiateAPIVersionPing updates the version string associated with this +// instance of the Client to match the latest version the server supports +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if cli.manualOverride { + return + } + + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if server version is lower than the current cli, downgrade + if versions.LessThan(p.APIVersion, cli.ClientVersion()) { + cli.version = p.APIVersion + } +} + +// DaemonHost returns the host associated with this instance of the Client. +// This operation doesn't acquire a mutex. +func (cli *Client) DaemonHost() string { + return cli.host +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} + +// CustomHTTPHeaders returns the custom http headers associated with this +// instance of the Client. This operation doesn't acquire a mutex. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders updates the custom http headers associated with this +// instance of the Client. This operation doesn't acquire a mutex. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers +} diff --git a/client/client_mock_test.go b/client/client_mock_test.go new file mode 100644 index 0000000..0ab935d --- /dev/null +++ b/client/client_mock_test.go @@ -0,0 +1,45 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" +) + +func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { + return &http.Client{ + Transport: transportFunc(doer), + } +} + +func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", "application/json") + + body, err := json.Marshal(&types.ErrorResponse{ + Message: message, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader(body)), + Header: header, + }, nil + } +} + +func plainTextErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), + }, nil + } +} diff --git a/client/client_test.go b/client/client_test.go new file mode 100644 index 0000000..bc911c0 --- /dev/null +++ b/client/client_test.go @@ -0,0 +1,344 @@ +package client + +import ( + "bytes" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" +) + +func TestNewEnvClient(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping unix only test for windows") + } + cases := []struct { + envs map[string]string + expectedError string + expectedVersion string + }{ + { + envs: map[string]string{}, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "invalid/path", + }, + expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory", + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_TLS_VERIFY": "1", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_HOST": "https://notaunixsocket", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_HOST": "host", + }, + expectedError: "unable to parse docker host `host`", + }, + { + envs: map[string]string{ + "DOCKER_HOST": "invalid://url", + }, + expectedVersion: api.DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "anything", + }, + expectedVersion: "anything", + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "1.22", + }, + expectedVersion: "1.22", + }, + } + + env := envToMap() + defer mapToEnv(env) + for _, c := range cases { + mapToEnv(env) + mapToEnv(c.envs) + apiclient, err := NewEnvClient() + if c.expectedError != "" { + assert.Error(t, err) + assert.Equal(t, c.expectedError, err.Error()) + } else { + assert.NoError(t, err) + version := apiclient.ClientVersion() + assert.Equal(t, c.expectedVersion, version) + } + + if c.envs["DOCKER_TLS_VERIFY"] != "" { + // pedantic checking that this is handled correctly + tr := apiclient.client.Transport.(*http.Transport) + assert.NotNil(t, tr.TLSClientConfig) + assert.Equal(t, tr.TLSClientConfig.InsecureSkipVerify, false) + } + } +} + +func TestGetAPIPath(t *testing.T) { + cases := []struct { + v string + p string + q url.Values + e string + }{ + {"", "/containers/json", nil, "/containers/json"}, + {"", "/containers/json", url.Values{}, "/containers/json"}, + {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, + {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, + } + + for _, cs := range cases { + c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) + if err != nil { + t.Fatal(err) + } + g := c.getAPIPath(cs.p, cs.q) + assert.Equal(t, g, cs.e) + + err = c.Close() + assert.NoError(t, err) + } +} + +func TestParseHost(t *testing.T) { + cases := []struct { + host string + proto string + addr string + base string + err bool + }{ + {"", "", "", "", true}, + {"foobar", "", "", "", true}, + {"foo://bar", "foo", "bar", "", false}, + {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, + {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, + } + + for _, cs := range cases { + p, a, b, e := ParseHost(cs.host) + // if we expected an error to be returned... + if cs.err { + assert.Error(t, e) + } + assert.Equal(t, cs.proto, p) + assert.Equal(t, cs.addr, a) + assert.Equal(t, cs.base, b) + } +} + +func TestNewEnvClientSetsDefaultVersion(t *testing.T) { + env := envToMap() + defer mapToEnv(env) + + envMap := map[string]string{ + "DOCKER_HOST": "", + "DOCKER_API_VERSION": "", + "DOCKER_TLS_VERIFY": "", + "DOCKER_CERT_PATH": "", + } + mapToEnv(envMap) + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, client.version, api.DefaultVersion) + + expected := "1.22" + os.Setenv("DOCKER_API_VERSION", expected) + client, err = NewEnvClient() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, expected, client.version) +} + +// TestNegotiateAPIVersionEmpty asserts that client.Client can +// negotiate a compatible APIVersion when omitted +func TestNegotiateAPIVersionEmpty(t *testing.T) { + env := envToMap() + defer mapToEnv(env) + + envMap := map[string]string{ + "DOCKER_API_VERSION": "", + } + mapToEnv(envMap) + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + + ping := types.Ping{ + APIVersion: "", + OSType: "linux", + Experimental: false, + } + + // set our version to something new + client.version = "1.25" + + // if no version from server, expect the earliest + // version before APIVersion was implemented + expected := "1.24" + + // test downgrade + client.NegotiateAPIVersionPing(ping) + assert.Equal(t, expected, client.version) +} + +// TestNegotiateAPIVersion asserts that client.Client can +// negotiate a compatible APIVersion with the server +func TestNegotiateAPIVersion(t *testing.T) { + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + + expected := "1.21" + + ping := types.Ping{ + APIVersion: expected, + OSType: "linux", + Experimental: false, + } + + // set our version to something new + client.version = "1.22" + + // test downgrade + client.NegotiateAPIVersionPing(ping) + assert.Equal(t, expected, client.version) +} + +// TestNegotiateAPIVersionOverride asserts that we honor +// the environment variable DOCKER_API_VERSION when negotianing versions +func TestNegotiateAPVersionOverride(t *testing.T) { + env := envToMap() + defer mapToEnv(env) + + envMap := map[string]string{ + "DOCKER_API_VERSION": "9.99", + } + mapToEnv(envMap) + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + + ping := types.Ping{ + APIVersion: "1.24", + OSType: "linux", + Experimental: false, + } + + expected := envMap["DOCKER_API_VERSION"] + + // test that we honored the env var + client.NegotiateAPIVersionPing(ping) + assert.Equal(t, expected, client.version) +} + +// mapToEnv takes a map of environment variables and sets them +func mapToEnv(env map[string]string) { + for k, v := range env { + os.Setenv(k, v) + } +} + +// envToMap returns a map of environment variables +func envToMap() map[string]string { + env := make(map[string]string) + for _, e := range os.Environ() { + kv := strings.SplitAfterN(e, "=", 2) + env[kv[0]] = kv[1] + } + + return env +} + +type roundTripFunc func(*http.Request) (*http.Response, error) + +func (rtf roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return rtf(req) +} + +type bytesBufferClose struct { + *bytes.Buffer +} + +func (bbc bytesBufferClose) Close() error { + return nil +} + +func TestClientRedirect(t *testing.T) { + client := &http.Client{ + CheckRedirect: CheckRedirect, + Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) { + if req.URL.String() == "/bla" { + return &http.Response{StatusCode: 404}, nil + } + return &http.Response{ + StatusCode: 301, + Header: map[string][]string{"Location": {"/bla"}}, + Body: bytesBufferClose{bytes.NewBuffer(nil)}, + }, nil + }), + } + + cases := []struct { + httpMethod string + expectedErr error + statusCode int + }{ + {http.MethodGet, nil, 301}, + {http.MethodPost, &url.Error{Op: "Post", URL: "/bla", Err: ErrRedirect}, 301}, + {http.MethodPut, &url.Error{Op: "Put", URL: "/bla", Err: ErrRedirect}, 301}, + {http.MethodDelete, &url.Error{Op: "Delete", URL: "/bla", Err: ErrRedirect}, 301}, + } + + for _, tc := range cases { + req, err := http.NewRequest(tc.httpMethod, "/redirectme", nil) + assert.NoError(t, err) + resp, err := client.Do(req) + assert.Equal(t, tc.expectedErr, err) + assert.Equal(t, tc.statusCode, resp.StatusCode) + } +} diff --git a/client/client_unix.go b/client/client_unix.go new file mode 100644 index 0000000..89de892 --- /dev/null +++ b/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd darwin + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/client/client_windows.go b/client/client_windows.go new file mode 100644 index 0000000..07c0c7a --- /dev/null +++ b/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/client/config_create.go b/client/config_create.go new file mode 100644 index 0000000..bc4a952 --- /dev/null +++ b/client/config_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/client/config_create_test.go b/client/config_create_test.go new file mode 100644 index 0000000..000eaf1 --- /dev/null +++ b/client/config_create_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigCreateUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + assert.EqualError(t, err, `"config create" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigCreateError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigCreate(t *testing.T) { + expectedURL := "/v1.30/configs/create" + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ConfigCreateResponse{ + ID: "test_config", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_config" { + t.Fatalf("expected `test_config`, got %s", r.ID) + } +} diff --git a/client/config_inspect.go b/client/config_inspect.go new file mode 100644 index 0000000..ebb6d63 --- /dev/null +++ b/client/config_inspect.go @@ -0,0 +1,37 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Config{}, nil, configNotFoundError{id} + } + return swarm.Config{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/client/config_inspect_test.go b/client/config_inspect_test.go new file mode 100644 index 0000000..010b188 --- /dev/null +++ b/client/config_inspect_test.go @@ -0,0 +1,78 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") + assert.EqualError(t, err, `"config inspect" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigInspectError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigInspectConfigNotFound(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrConfigNotFound(err) { + t.Fatalf("expected a configNotFoundError error, got %v", err) + } +} + +func TestConfigInspect(t *testing.T) { + expectedURL := "/v1.30/configs/config_id" + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Config{ + ID: "config_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + configInspect, _, err := client.ConfigInspectWithRaw(context.Background(), "config_id") + if err != nil { + t.Fatal(err) + } + if configInspect.ID != "config_id" { + t.Fatalf("expected `config_id`, got %s", configInspect.ID) + } +} diff --git a/client/config_list.go b/client/config_list.go new file mode 100644 index 0000000..8483ca1 --- /dev/null +++ b/client/config_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + ensureReaderClosed(resp) + return configs, err +} diff --git a/client/config_list_test.go b/client/config_list_test.go new file mode 100644 index 0000000..4fe0546 --- /dev/null +++ b/client/config_list_test.go @@ -0,0 +1,106 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigListUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) + assert.EqualError(t, err, `"config list" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigListError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigList(t *testing.T) { + expectedURL := "/v1.30/configs" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ConfigListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ConfigListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ConfigListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Config{ + { + ID: "config_id1", + }, + { + ID: "config_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + configs, err := client.ConfigList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(configs) != 2 { + t.Fatalf("expected 2 configs, got %v", configs) + } + } +} diff --git a/client/config_remove.go b/client/config_remove.go new file mode 100644 index 0000000..726b5c8 --- /dev/null +++ b/client/config_remove.go @@ -0,0 +1,13 @@ +package client + +import "golang.org/x/net/context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/config_remove_test.go b/client/config_remove_test.go new file mode 100644 index 0000000..f2a9fee --- /dev/null +++ b/client/config_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigRemoveUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + err := client.ConfigRemove(context.Background(), "config_id") + assert.EqualError(t, err, `"config remove" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigRemoveError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ConfigRemove(context.Background(), "config_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigRemove(t *testing.T) { + expectedURL := "/v1.30/configs/config_id" + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ConfigRemove(context.Background(), "config_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/config_update.go b/client/config_update.go new file mode 100644 index 0000000..823751b --- /dev/null +++ b/client/config_update.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/config_update_test.go b/client/config_update_test.go new file mode 100644 index 0000000..799e544 --- /dev/null +++ b/client/config_update_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestConfigUpdateUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + assert.EqualError(t, err, `"config update" requires API version 1.30, but the Docker daemon API version is 1.29`) +} + +func TestConfigUpdateError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigUpdate(t *testing.T) { + expectedURL := "/v1.30/configs/config_id/update" + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_attach.go b/client/container_attach.go new file mode 100644 index 0000000..eea4682 --- /dev/null +++ b/client/container_attach.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/client/container_commit.go b/client/container_commit.go new file mode 100644 index 0000000..531d796 --- /dev/null +++ b/client/container_commit.go @@ -0,0 +1,55 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/client/container_commit_test.go b/client/container_commit_test.go new file mode 100644 index 0000000..6947ed3 --- /dev/null +++ b/client/container_commit_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerCommitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerCommit(t *testing.T) { + expectedURL := "/commit" + expectedContainerID := "container_id" + specifiedReference := "repository_name:tag" + expectedRepositoryName := "repository_name" + expectedTag := "tag" + expectedComment := "comment" + expectedAuthor := "author" + expectedChanges := []string{"change1", "change2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + containerID := query.Get("container") + if containerID != expectedContainerID { + return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) + } + repo := query.Get("repo") + if repo != expectedRepositoryName { + return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) + } + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) + } + comment := query.Get("comment") + if comment != expectedComment { + return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) + } + author := query.Get("author") + if author != expectedAuthor { + return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) + } + pause := query.Get("pause") + if pause != "0" { + return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) + } + changes := query["changes"] + if len(changes) != len(expectedChanges) { + return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) + } + b, err := json.Marshal(types.IDResponse{ + ID: "new_container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ + Reference: specifiedReference, + Comment: expectedComment, + Author: expectedAuthor, + Changes: expectedChanges, + Pause: false, + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "new_container_id" { + t.Fatalf("expected `new_container_id`, got %s", r.ID) + } +} diff --git a/client/container_copy.go b/client/container_copy.go new file mode 100644 index 0000000..30ba680 --- /dev/null +++ b/client/container_copy.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := "/containers/" + containerID + "/archive" + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + container + "/archive" + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := "/containers/" + container + "/archive" + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/client/container_copy_test.go b/client/container_copy_test.go new file mode 100644 index 0000000..c84f82e --- /dev/null +++ b/client/container_copy_test.go @@ -0,0 +1,244 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStatPathError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestContainerStatPathNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestContainerStatPath(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "HEAD" { + return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly") + } + content, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(content) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } +} + +func TestCopyToContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyToContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyToContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "PUT" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") + if noOverwriteDirNonDir != "true" { + return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) + } + + content, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + if err := req.Body.Close(); err != nil { + return nil, err + } + if string(content) != "content" { + return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyFromContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyFromContainerNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestCopyFromContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + + headercontent, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(headercontent) + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } + content, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + if string(content) != "content" { + t.Fatalf("expected content to be 'content', got %s", string(content)) + } +} diff --git a/client/container_create.go b/client/container_create.go new file mode 100644 index 0000000..6841b0b --- /dev/null +++ b/client/container_create.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/client/container_create_test.go b/client/container_create_test.go new file mode 100644 index 0000000..3ab608c --- /dev/null +++ b/client/container_create_test.go @@ -0,0 +1,118 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) + } + + // 404 doesn't automatically means an unknown image + client = &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusNotFound, got %v", err) + } +} + +func TestContainerCreateImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "No such image")), + } + _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestContainerCreateWithName(t *testing.T) { + expectedURL := "/containers/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "container_name" { + return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} + +// TestContainerCreateAutoRemove validates that a client using API 1.24 always disables AutoRemove. When using API 1.25 +// or up, AutoRemove should not be disabled. +func TestContainerCreateAutoRemove(t *testing.T) { + autoRemoveValidator := func(expectedValue bool) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + var config configWrapper + + if err := json.NewDecoder(req.Body).Decode(&config); err != nil { + return nil, err + } + if config.HostConfig.AutoRemove != expectedValue { + return nil, fmt.Errorf("expected AutoRemove to be %v, got %v", expectedValue, config.HostConfig.AutoRemove) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } + } + + client := &Client{ + client: newMockClient(autoRemoveValidator(false)), + version: "1.24", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } + client = &Client{ + client: newMockClient(autoRemoveValidator(true)), + version: "1.25", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } +} diff --git a/client/container_diff.go b/client/container_diff.go new file mode 100644 index 0000000..884dc9f --- /dev/null +++ b/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/client/container_diff_test.go b/client/container_diff_test.go new file mode 100644 index 0000000..57dd73e --- /dev/null +++ b/client/container_diff_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerDiffError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerDiff(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + +} + +func TestContainerDiff(t *testing.T) { + expectedURL := "/containers/container_id/changes" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal([]container.ContainerChangeResponseItem{ + { + Kind: 0, + Path: "/path/1", + }, + { + Kind: 1, + Path: "/path/2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + changes, err := client.ContainerDiff(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if len(changes) != 2 { + t.Fatalf("expected an array of 2 changes, got %v", changes) + } +} diff --git a/client/container_exec.go b/client/container_exec.go new file mode 100644 index 0000000..0665c54 --- /dev/null +++ b/client/container_exec.go @@ -0,0 +1,54 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/client/container_exec_test.go b/client/container_exec_test.go new file mode 100644 index 0000000..0e296a5 --- /dev/null +++ b/client/container_exec_test.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerExecCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecCreate(t *testing.T) { + expectedURL := "/containers/container_id/exec" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + // FIXME validate the content is the given ExecConfig ? + if err := req.ParseForm(); err != nil { + return nil, err + } + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { + return nil, err + } + if execConfig.User != "user" { + return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) + } + b, err := json.Marshal(types.IDResponse{ + ID: "exec_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ + User: "user", + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "exec_id" { + t.Fatalf("expected `exec_id`, got %s", r.ID) + } +} + +func TestContainerExecStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecStart(t *testing.T) { + expectedURL := "/exec/exec_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if err := req.ParseForm(); err != nil { + return nil, err + } + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { + return nil, err + } + if execStartCheck.Tty || !execStartCheck.Detach { + return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ + Detach: true, + Tty: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecInspect(t *testing.T) { + expectedURL := "/exec/exec_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(types.ContainerExecInspect{ + ExecID: "exec_id", + ContainerID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") + if err != nil { + t.Fatal(err) + } + if inspect.ExecID != "exec_id" { + t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) + } + if inspect.ContainerID != "container_id" { + t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) + } +} diff --git a/client/container_export.go b/client/container_export.go new file mode 100644 index 0000000..52194f3 --- /dev/null +++ b/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/client/container_export_test.go b/client/container_export_test.go new file mode 100644 index 0000000..5849fe9 --- /dev/null +++ b/client/container_export_test.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerExportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExport(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExport(t *testing.T) { + expectedURL := "/containers/container_id/export" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerExport(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } +} diff --git a/client/container_inspect.go b/client/container_inspect.go new file mode 100644 index 0000000..17f1809 --- /dev/null +++ b/client/container_inspect.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/client/container_inspect_test.go b/client/container_inspect_test.go new file mode 100644 index 0000000..98f83bd --- /dev/null +++ b/client/container_inspect_test.go @@ -0,0 +1,125 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "unknown") + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestContainerInspect(t *testing.T) { + expectedURL := "/containers/container_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.Image) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.Name) + } +} + +func TestContainerInspectNode(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + Node: &types.ContainerNode{ + ID: "container_node_id", + Addr: "container_node", + Labels: map[string]string{"foo": "bar"}, + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.Image) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.Name) + } + if r.Node.ID != "container_node_id" { + t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) + } + if r.Node.Addr != "container_node" { + t.Fatalf("expected `container_node`, got %s", r.Node.Addr) + } + foo, ok := r.Node.Labels["foo"] + if foo != "bar" || !ok { + t.Fatalf("expected `bar` for label `foo`") + } +} diff --git a/client/container_kill.go b/client/container_kill.go new file mode 100644 index 0000000..29f80c7 --- /dev/null +++ b/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_kill_test.go b/client/container_kill_test.go new file mode 100644 index 0000000..9477b0a --- /dev/null +++ b/client/container_kill_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerKillError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerKill(t *testing.T) { + expectedURL := "/containers/container_id/kill" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + signal := req.URL.Query().Get("signal") + if signal != "SIGKILL" { + return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_list.go b/client/container_list.go new file mode 100644 index 0000000..4398912 --- /dev/null +++ b/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/client/container_list_test.go b/client/container_list_test.go new file mode 100644 index 0000000..e41c687 --- /dev/null +++ b/client/container_list_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestContainerListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerList(t *testing.T) { + expectedURL := "/containers/json" + expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + all := query.Get("all") + if all != "1" { + return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) + } + limit := query.Get("limit") + if limit != "0" { + return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) + } + since := query.Get("since") + if since != "container" { + return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) + } + before := query.Get("before") + if before != "" { + return nil, fmt.Errorf("before should have not be present in query, go %s", before) + } + size := query.Get("size") + if size != "1" { + return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) + } + + b, err := json.Marshal([]types.Container{ + { + ID: "container_id1", + }, + { + ID: "container_id2", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("before", "container") + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + Size: true, + All: true, + Since: "container", + Filters: filters, + }) + if err != nil { + t.Fatal(err) + } + if len(containers) != 2 { + t.Fatalf("expected 2 containers, got %v", containers) + } +} diff --git a/client/container_logs.go b/client/container_logs.go new file mode 100644 index 0000000..69056b6 --- /dev/null +++ b/client/container_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/client/container_logs_test.go b/client/container_logs_test.go new file mode 100644 index 0000000..99e3184 --- /dev/null +++ b/client/container_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestContainerLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestContainerLogs(t *testing.T) { + expectedURL := "/containers/container_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ContainerLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/client/container_pause.go b/client/container_pause.go new file mode 100644 index 0000000..412067a --- /dev/null +++ b/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_pause_test.go b/client/container_pause_test.go new file mode 100644 index 0000000..0ee2f05 --- /dev/null +++ b/client/container_pause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerPauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerPause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerPause(t *testing.T) { + expectedURL := "/containers/container_id/pause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerPause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_prune.go b/client/container_prune.go new file mode 100644 index 0000000..b582170 --- /dev/null +++ b/client/container_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/client/container_prune_test.go b/client/container_prune_test.go new file mode 100644 index 0000000..8a1c638 --- /dev/null +++ b/client/container_prune_test.go @@ -0,0 +1,124 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestContainersPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.ContainersPrune(context.Background(), filters) + assert.EqualError(t, err, "Error response from daemon: Server error") +} + +func TestContainersPrune(t *testing.T) { + expectedURL := "/v1.25/containers/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingUntilFilters := filters.NewArgs() + danglingUntilFilters.Add("dangling", "true") + danglingUntilFilters.Add("until", "2016-12-15T14:00") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: danglingUntilFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"until":{"2016-12-15T14:00":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.ContainersPruneReport{ + ContainersDeleted: []string{"container_id1", "container_id2"}, + SpaceReclaimed: 9999, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.ContainersPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.ContainersDeleted, 2) + assert.Equal(t, uint64(9999), report.SpaceReclaimed) + } +} diff --git a/client/container_remove.go b/client/container_remove.go new file mode 100644 index 0000000..3a79590 --- /dev/null +++ b/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_remove_test.go b/client/container_remove_test.go new file mode 100644 index 0000000..798c08b --- /dev/null +++ b/client/container_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRemove(t *testing.T) { + expectedURL := "/containers/container_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + volume := query.Get("v") + if volume != "1" { + return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) + } + force := query.Get("force") + if force != "1" { + return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) + } + link := query.Get("link") + if link != "" { + return nil, fmt.Errorf("link should have not be present in query, go %s", link) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_rename.go b/client/container_rename.go new file mode 100644 index 0000000..0e718da --- /dev/null +++ b/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_rename_test.go b/client/container_rename_test.go new file mode 100644 index 0000000..732ebff --- /dev/null +++ b/client/container_rename_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerRenameError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRename(context.Background(), "nothing", "newNothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRename(t *testing.T) { + expectedURL := "/containers/container_id/rename" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "newName" { + return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRename(context.Background(), "container_id", "newName") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_resize.go b/client/container_resize.go new file mode 100644 index 0000000..66c3cc1 --- /dev/null +++ b/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_resize_test.go b/client/container_resize_test.go new file mode 100644 index 0000000..5b2efec --- /dev/null +++ b/client/container_resize_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/containers/container_id/resize")), + } + + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/exec/exec_id/resize")), + } + + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + h := query.Get("h") + if h != "500" { + return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) + } + w := query.Get("w") + if w != "600" { + return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + } +} diff --git a/client/container_restart.go b/client/container_restart.go new file mode 100644 index 0000000..74d7455 --- /dev/null +++ b/client/container_restart.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_restart_test.go b/client/container_restart_test.go new file mode 100644 index 0000000..8c3cfd6 --- /dev/null +++ b/client/container_restart_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerRestartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerRestart(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRestart(t *testing.T) { + expectedURL := "/containers/container_id/restart" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerRestart(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_start.go b/client/container_start.go new file mode 100644 index 0000000..b1f08de --- /dev/null +++ b/client/container_start.go @@ -0,0 +1,24 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_start_test.go b/client/container_start_test.go new file mode 100644 index 0000000..5826fa8 --- /dev/null +++ b/client/container_start_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerStart(context.Background(), "nothing", types.ContainerStartOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStart(t *testing.T) { + expectedURL := "/containers/container_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + // we're not expecting any payload, but if one is supplied, check it is valid. + if req.Header.Get("Content-Type") == "application/json" { + var startConfig interface{} + if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { + return nil, fmt.Errorf("Unable to parse json: %s", err) + } + } + + checkpoint := req.URL.Query().Get("checkpoint") + if checkpoint != "checkpoint_id" { + return nil, fmt.Errorf("checkpoint not set in URL query properly. Expected 'checkpoint_id', got %s", checkpoint) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerStart(context.Background(), "container_id", types.ContainerStartOptions{CheckpointID: "checkpoint_id"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_stats.go b/client/container_stats.go new file mode 100644 index 0000000..4758c66 --- /dev/null +++ b/client/container_stats.go @@ -0,0 +1,26 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/client/container_stats_test.go b/client/container_stats_test.go new file mode 100644 index 0000000..7414f13 --- /dev/null +++ b/client/container_stats_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerStatsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStats(context.Background(), "nothing", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStats(t *testing.T) { + expectedURL := "/containers/container_id/stats" + cases := []struct { + stream bool + expectedStream string + }{ + { + expectedStream: "0", + }, + { + stream: true, + expectedStream: "1", + }, + } + for _, c := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + query := r.URL.Query() + stream := query.Get("stream") + if stream != c.expectedStream { + return nil, fmt.Errorf("stream not set in URL query properly. Expected '%s', got %s", c.expectedStream, stream) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + resp, err := client.ContainerStats(context.Background(), "container_id", c.stream) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} diff --git a/client/container_stop.go b/client/container_stop.go new file mode 100644 index 0000000..b5418ae --- /dev/null +++ b/client/container_stop.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_stop_test.go b/client/container_stop_test.go new file mode 100644 index 0000000..c32cd69 --- /dev/null +++ b/client/container_stop_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerStopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerStop(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStop(t *testing.T) { + expectedURL := "/containers/container_id/stop" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerStop(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_top.go b/client/container_top.go new file mode 100644 index 0000000..9689123 --- /dev/null +++ b/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/client/container_top_test.go b/client/container_top_test.go new file mode 100644 index 0000000..68ccef5 --- /dev/null +++ b/client/container_top_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerTopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerTop(context.Background(), "nothing", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerTop(t *testing.T) { + expectedURL := "/containers/container_id/top" + expectedProcesses := [][]string{ + {"p1", "p2"}, + {"p3"}, + } + expectedTitles := []string{"title1", "title2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + args := query.Get("ps_args") + if args != "arg1 arg2" { + return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) + } + + b, err := json.Marshal(container.ContainerTopOKBody{ + Processes: [][]string{ + {"p1", "p2"}, + {"p3"}, + }, + Titles: []string{"title1", "title2"}, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expectedProcesses, processList.Processes) { + t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) + } + if !reflect.DeepEqual(expectedTitles, processList.Titles) { + t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) + } +} diff --git a/client/container_unpause.go b/client/container_unpause.go new file mode 100644 index 0000000..5c76211 --- /dev/null +++ b/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/container_unpause_test.go b/client/container_unpause_test.go new file mode 100644 index 0000000..2c42727 --- /dev/null +++ b/client/container_unpause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerUnpauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerUnpause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUnpause(t *testing.T) { + expectedURL := "/containers/container_id/unpause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerUnpause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_update.go b/client/container_update.go new file mode 100644 index 0000000..5082f22 --- /dev/null +++ b/client/container_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + + ensureReaderClosed(serverResp) + return response, err +} diff --git a/client/container_update_test.go b/client/container_update_test.go new file mode 100644 index 0000000..715bb7c --- /dev/null +++ b/client/container_update_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUpdate(t *testing.T) { + expectedURL := "/containers/container_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + b, err := json.Marshal(container.ContainerUpdateOKBody{}) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + _, err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ + Resources: container.Resources{ + CPUPeriod: 1, + }, + RestartPolicy: container.RestartPolicy{ + Name: "always", + }, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/container_wait.go b/client/container_wait.go new file mode 100644 index 0000000..13b5eea --- /dev/null +++ b/client/container_wait.go @@ -0,0 +1,84 @@ +package client + +import ( + "encoding/json" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is beforer 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// sychronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/client/container_wait_test.go b/client/container_wait_test.go new file mode 100644 index 0000000..7b8c9f0 --- /dev/null +++ b/client/container_wait_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + + "golang.org/x/net/context" +) + +func TestContainerWaitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + resultC, errC := client.ContainerWait(context.Background(), "nothing", "") + select { + case result := <-resultC: + t.Fatalf("expected to not get a wait result, got %d", result.StatusCode) + case err := <-errC: + if err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + } +} + +func TestContainerWait(t *testing.T) { + expectedURL := "/containers/container_id/wait" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(container.ContainerWaitOKBody{ + StatusCode: 15, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + resultC, errC := client.ContainerWait(context.Background(), "container_id", "") + select { + case err := <-errC: + t.Fatal(err) + case result := <-resultC: + if result.StatusCode != 15 { + t.Fatalf("expected a status code equal to '15', got %d", result.StatusCode) + } + } +} + +func ExampleClient_ContainerWait_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + _, errC := client.ContainerWait(ctx, "container_id", "") + if err := <-errC; err != nil { + log.Fatal(err) + } +} diff --git a/client/disk_usage.go b/client/disk_usage.go new file mode 100644 index 0000000..03c80b3 --- /dev/null +++ b/client/disk_usage.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + if err != nil { + return du, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/client/disk_usage_test.go b/client/disk_usage_test.go new file mode 100644 index 0000000..51e6222 --- /dev/null +++ b/client/disk_usage_test.go @@ -0,0 +1,55 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestDiskUsageError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.DiskUsage(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestDiskUsage(t *testing.T) { + expectedURL := "/system/df" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + du := types.DiskUsage{ + LayersSize: int64(100), + Images: nil, + Containers: nil, + Volumes: nil, + } + + b, err := json.Marshal(du) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + if _, err := client.DiskUsage(context.Background()); err != nil { + t.Fatal(err) + } +} diff --git a/client/distribution_inspect.go b/client/distribution_inspect.go new file mode 100644 index 0000000..aa5bc6a --- /dev/null +++ b/client/distribution_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + ensureReaderClosed(resp) + return distributionInspect, err +} diff --git a/client/distribution_inspect_test.go b/client/distribution_inspect_test.go new file mode 100644 index 0000000..eff28d7 --- /dev/null +++ b/client/distribution_inspect_test.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestDistributionInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.DistributionInspect(context.Background(), "foobar:1.0", "") + assert.EqualError(t, err, `"distribution inspect" requires API version 1.30, but the Docker daemon API version is 1.29`) +} diff --git a/client/errors.go b/client/errors.go new file mode 100644 index 0000000..ab37f94 --- /dev/null +++ b/client/errors.go @@ -0,0 +1,300 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + _, ok := errors.Cause(err).(errConnectionFailed) + return ok +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is caused with an +// object (image, container, network, volume, …) is not found in the docker host. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// NotFound indicates that this error type is of NotFound +func (e imageNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of an imageNotFoundError +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", e.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + return IsErrNotFound(err) +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// NotFound indicates that this error type is of NotFound +func (e containerNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + return IsErrNotFound(err) +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// NotFound indicates that this error type is of NotFound +func (e networkNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + return IsErrNotFound(err) +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// NotFound indicates that this error type is of NotFound +func (e volumeNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a volumeNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + return IsErrNotFound(err) +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +// nodeNotFoundError implements an error returned when a node is not found. +type nodeNotFoundError struct { + nodeID string +} + +// Error returns a string representation of a nodeNotFoundError +func (e nodeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such node: %s", e.nodeID) +} + +// NotFound indicates that this error type is of NotFound +func (e nodeNotFoundError) NotFound() bool { + return true +} + +// IsErrNodeNotFound returns true if the error is caused +// when a node is not found. +func IsErrNodeNotFound(err error) bool { + _, ok := err.(nodeNotFoundError) + return ok +} + +// serviceNotFoundError implements an error returned when a service is not found. +type serviceNotFoundError struct { + serviceID string +} + +// Error returns a string representation of a serviceNotFoundError +func (e serviceNotFoundError) Error() string { + return fmt.Sprintf("Error: No such service: %s", e.serviceID) +} + +// NotFound indicates that this error type is of NotFound +func (e serviceNotFoundError) NotFound() bool { + return true +} + +// IsErrServiceNotFound returns true if the error is caused +// when a service is not found. +func IsErrServiceNotFound(err error) bool { + _, ok := err.(serviceNotFoundError) + return ok +} + +// taskNotFoundError implements an error returned when a task is not found. +type taskNotFoundError struct { + taskID string +} + +// Error returns a string representation of a taskNotFoundError +func (e taskNotFoundError) Error() string { + return fmt.Sprintf("Error: No such task: %s", e.taskID) +} + +// NotFound indicates that this error type is of NotFound +func (e taskNotFoundError) NotFound() bool { + return true +} + +// IsErrTaskNotFound returns true if the error is caused +// when a task is not found. +func IsErrTaskNotFound(err error) bool { + _, ok := err.(taskNotFoundError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} + +// secretNotFoundError implements an error returned when a secret is not found. +type secretNotFoundError struct { + name string +} + +// Error returns a string representation of a secretNotFoundError +func (e secretNotFoundError) Error() string { + return fmt.Sprintf("Error: no such secret: %s", e.name) +} + +// NotFound indicates that this error type is of NotFound +func (e secretNotFoundError) NotFound() bool { + return true +} + +// IsErrSecretNotFound returns true if the error is caused +// when a secret is not found. +func IsErrSecretNotFound(err error) bool { + _, ok := err.(secretNotFoundError) + return ok +} + +// configNotFoundError implements an error returned when a config is not found. +type configNotFoundError struct { + name string +} + +// Error returns a string representation of a configNotFoundError +func (e configNotFoundError) Error() string { + return fmt.Sprintf("Error: no such config: %s", e.name) +} + +// NotFound indicates that this error type is of NotFound +func (e configNotFoundError) NotFound() bool { + return true +} + +// IsErrConfigNotFound returns true if the error is caused +// when a config is not found. +func IsErrConfigNotFound(err error) bool { + _, ok := err.(configNotFoundError) + return ok +} + +// pluginNotFoundError implements an error returned when a plugin is not in the docker host. +type pluginNotFoundError struct { + name string +} + +// NotFound indicates that this error type is of NotFound +func (e pluginNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a pluginNotFoundError +func (e pluginNotFoundError) Error() string { + return fmt.Sprintf("Error: No such plugin: %s", e.name) +} + +// IsErrPluginNotFound returns true if the error is caused +// when a plugin is not found in the docker host. +func IsErrPluginNotFound(err error) bool { + return IsErrNotFound(err) +} diff --git a/client/events.go b/client/events.go new file mode 100644 index 0000000..af47aef --- /dev/null +++ b/client/events.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/json" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/client/events_test.go b/client/events_test.go new file mode 100644 index 0000000..ba82d2f --- /dev/null +++ b/client/events_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +func TestEventsErrorInOptions(t *testing.T) { + errorCases := []struct { + options types.EventsOptions + expectedError string + }{ + { + options: types.EventsOptions{ + Since: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + { + options: types.EventsOptions{ + Until: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + } + for _, e := range errorCases { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), e.options) + err := <-errs + if err == nil || !strings.Contains(err.Error(), e.expectedError) { + t.Fatalf("expected an error %q, got %v", e.expectedError, err) + } + } +} + +func TestEventsErrorFromServer(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), types.EventsOptions{}) + err := <-errs + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestEvents(t *testing.T) { + + expectedURL := "/events" + + filters := filters.NewArgs() + filters.Add("type", events.ContainerEventType) + expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType) + + eventsCases := []struct { + options types.EventsOptions + events []events.Message + expectedEvents map[string]bool + expectedQueryParams map[string]string + }{ + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{}, + expectedEvents: make(map[string]bool), + }, + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{ + { + Type: "container", + ID: "1", + Action: "create", + }, + { + Type: "container", + ID: "2", + Action: "die", + }, + { + Type: "container", + ID: "3", + Action: "create", + }, + }, + expectedEvents: map[string]bool{ + "1": true, + "2": true, + "3": true, + }, + }, + } + + for _, eventsCase := range eventsCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + + for key, expected := range eventsCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + buffer := new(bytes.Buffer) + + for _, e := range eventsCase.events { + b, _ := json.Marshal(e) + buffer.Write(b) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(buffer), + }, nil + }), + } + + messages, errs := client.Events(context.Background(), eventsCase.options) + + loop: + for { + select { + case err := <-errs: + if err != nil && err != io.EOF { + t.Fatal(err) + } + + break loop + case e := <-messages: + _, ok := eventsCase.expectedEvents[e.ID] + if !ok { + t.Fatalf("event received not expected with action %s & id %s", e.Action, e.ID) + } + } + } + } +} diff --git a/client/hijack.go b/client/hijack.go new file mode 100644 index 0000000..346c74a --- /dev/null +++ b/client/hijack.go @@ -0,0 +1,206 @@ +package client + +import ( + "bufio" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + conn, err := cli.setupHijackConn(req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = tlsconfig.Clone(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection + c = &hijackedConn{c, br} + } else { + br.Reset(nil) + } + + return c, nil +} + +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} diff --git a/client/image_build.go b/client/image_build.go new file mode 100644 index 0000000..b3c1f9a --- /dev/null +++ b/client/image_build.go @@ -0,0 +1,134 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + + volumesJSON, err := json.Marshal(options.Volumes) + if err != nil { + return query, err + } + query.Set("volumes", string(volumesJSON)) + + return query, nil +} diff --git a/client/image_build_test.go b/client/image_build_test.go new file mode 100644 index 0000000..1e18b7b --- /dev/null +++ b/client/image_build_test.go @@ -0,0 +1,233 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +func TestImageBuildError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageBuild(t *testing.T) { + v1 := "value1" + v2 := "value2" + emptyRegistryConfig := "bnVsbA==" + buildCases := []struct { + buildOptions types.ImageBuildOptions + expectedQueryParams map[string]string + expectedTags []string + expectedRegistryConfig string + }{ + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: true, + NoCache: true, + Remove: true, + ForceRemove: true, + PullParent: true, + }, + expectedQueryParams: map[string]string{ + "q": "1", + "nocache": "1", + "rm": "1", + "forcerm": "1", + "pull": "1", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: false, + NoCache: false, + Remove: false, + ForceRemove: false, + PullParent: false, + }, + expectedQueryParams: map[string]string{ + "q": "", + "nocache": "", + "rm": "0", + "forcerm": "", + "pull": "", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + RemoteContext: "remoteContext", + Isolation: container.Isolation("isolation"), + CPUSetCPUs: "2", + CPUSetMems: "12", + CPUShares: 20, + CPUQuota: 10, + CPUPeriod: 30, + Memory: 256, + MemorySwap: 512, + ShmSize: 10, + CgroupParent: "cgroup_parent", + Dockerfile: "Dockerfile", + }, + expectedQueryParams: map[string]string{ + "remote": "remoteContext", + "isolation": "isolation", + "cpusetcpus": "2", + "cpusetmems": "12", + "cpushares": "20", + "cpuquota": "10", + "cpuperiod": "30", + "memory": "256", + "memswap": "512", + "shmsize": "10", + "cgroupparent": "cgroup_parent", + "dockerfile": "Dockerfile", + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + BuildArgs: map[string]*string{ + "ARG1": &v1, + "ARG2": &v2, + "ARG3": nil, + }, + }, + expectedQueryParams: map[string]string{ + "buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + Ulimits: []*units.Ulimit{ + { + Name: "nproc", + Hard: 65557, + Soft: 65557, + }, + { + Name: "nofile", + Hard: 20000, + Soft: 40000, + }, + }, + }, + expectedQueryParams: map[string]string{ + "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + AuthConfigs: map[string]types.AuthConfig{ + "https://index.docker.io/v1/": { + Auth: "dG90bwo=", + }, + }, + }, + expectedQueryParams: map[string]string{ + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", + }, + } + for _, buildCase := range buildCases { + expectedURL := "/build" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check request headers + registryConfig := r.Header.Get("X-Registry-Config") + if registryConfig != buildCase.expectedRegistryConfig { + return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) + } + contentType := r.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/x-tar', got %s", contentType) + } + + // Check query parameters + query := r.URL.Query() + for key, expected := range buildCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + // Check tags + if len(buildCase.expectedTags) > 0 { + tags := query["t"] + if !reflect.DeepEqual(tags, buildCase.expectedTags) { + return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) + } + } + + headers := http.Header{} + headers.Add("Server", "Docker/v1.23 (MyOS)") + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + Header: headers, + }, nil + }), + } + buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) + if err != nil { + t.Fatal(err) + } + if buildResponse.OSType != "MyOS" { + t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) + } + response, err := ioutil.ReadAll(buildResponse.Body) + if err != nil { + t.Fatal(err) + } + buildResponse.Body.Close() + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } + } +} + +func TestGetDockerOS(t *testing.T) { + cases := map[string]string{ + "Docker/v1.22 (linux)": "linux", + "Docker/v1.22 (windows)": "windows", + "Foo/v1.22 (bar)": "", + } + for header, os := range cases { + g := getDockerOS(header) + if g != os { + t.Fatalf("Expected %s, got %s", os, g) + } + } +} diff --git a/client/image_create.go b/client/image_create.go new file mode 100644 index 0000000..4436abb --- /dev/null +++ b/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/client/image_create_test.go b/client/image_create_test.go new file mode 100644 index 0000000..5c2edd2 --- /dev/null +++ b/client/image_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImageCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageCreate(t *testing.T) { + expectedURL := "/images/create" + expectedImage := "test:5000/my_image" + expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) + expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + registryAuth := r.Header.Get("X-Registry-Auth") + if registryAuth != expectedRegistryAuth { + return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) + } + + query := r.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != expectedImage { + return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) + } + + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ + RegistryAuth: expectedRegistryAuth, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(createResponse) + if err != nil { + t.Fatal(err) + } + if err = createResponse.Close(); err != nil { + t.Fatal(err) + } + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } +} diff --git a/client/image_delta.go b/client/image_delta.go new file mode 100644 index 0000000..1018fd0 --- /dev/null +++ b/client/image_delta.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageDelta(ctx context.Context, src, dest string) (io.ReadCloser, error) { + query := url.Values{} + query.Set("src", src) + query.Set("dest", dest) + + resp, err := cli.postRaw(ctx, "/images/delta", query, nil, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/client/image_history.go b/client/image_history.go new file mode 100644 index 0000000..7b4babc --- /dev/null +++ b/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/client/image_history_test.go b/client/image_history_test.go new file mode 100644 index 0000000..101bffd --- /dev/null +++ b/client/image_history_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/image" + "golang.org/x/net/context" +) + +func TestImageHistoryError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageHistory(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageHistory(t *testing.T) { + expectedURL := "/images/image_id/history" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + b, err := json.Marshal([]image.HistoryResponseItem{ + { + ID: "image_id1", + Tags: []string{"tag1", "tag2"}, + }, + { + ID: "image_id2", + Tags: []string{"tag1", "tag2"}, + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageHistories, err := client.ImageHistory(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if len(imageHistories) != 2 { + t.Fatalf("expected 2 containers, got %v", imageHistories) + } +} diff --git a/client/image_import.go b/client/image_import.go new file mode 100644 index 0000000..d7dedd8 --- /dev/null +++ b/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/client/image_import_test.go b/client/image_import_test.go new file mode 100644 index 0000000..370ad5f --- /dev/null +++ b/client/image_import_test.go @@ -0,0 +1,81 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageImportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageImport(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + fromSrc := query.Get("fromSrc") + if fromSrc != "image_source" { + return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) + } + repo := query.Get("repo") + if repo != "repository_name:imported" { + return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name:imported', got %s", repo) + } + tag := query.Get("tag") + if tag != "imported" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) + } + message := query.Get("message") + if message != "A message" { + return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) + } + changes := query["changes"] + expectedChanges := []string{"change1", "change2"} + if !reflect.DeepEqual(expectedChanges, changes) { + return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ + Source: strings.NewReader("source"), + SourceName: "image_source", + }, "repository_name:imported", types.ImageImportOptions{ + Tag: "imported", + Message: "A message", + Changes: []string{"change1", "change2"}, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(importResponse) + if err != nil { + t.Fatal(err) + } + importResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/client/image_inspect.go b/client/image_inspect.go new file mode 100644 index 0000000..b3a64ce --- /dev/null +++ b/client/image_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/client/image_inspect_test.go b/client/image_inspect_test.go new file mode 100644 index 0000000..74a4e49 --- /dev/null +++ b/client/image_inspect_test.go @@ -0,0 +1,71 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageInspectImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestImageInspect(t *testing.T) { + expectedURL := "/images/image_id/json" + expectedTags := []string{"tag1", "tag2"} + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ImageInspect{ + ID: "image_id", + RepoTags: expectedTags, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if imageInspect.ID != "image_id" { + t.Fatalf("expected `image_id`, got %s", imageInspect.ID) + } + if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { + t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) + } +} diff --git a/client/image_list.go b/client/image_list.go new file mode 100644 index 0000000..f26464f --- /dev/null +++ b/client/image_list.go @@ -0,0 +1,45 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/client/image_list_test.go b/client/image_list_test.go new file mode 100644 index 0000000..7c4a464 --- /dev/null +++ b/client/image_list_test.go @@ -0,0 +1,159 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestImageListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageList(context.Background(), types.ImageListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageList(t *testing.T) { + expectedURL := "/images/json" + + noDanglingfilters := filters.NewArgs() + noDanglingfilters.Add("dangling", "false") + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("dangling", "true") + + listCases := []struct { + options types.ImageListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ImageListOptions{}, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + options: types.ImageListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, + }, + }, + { + options: types.ImageListOptions{ + Filters: noDanglingfilters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + images, err := client.ImageList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } + } +} + +func TestImageListApiBefore125(t *testing.T) { + expectedFilter := "image:tag" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + query := req.URL.Query() + actualFilter := query.Get("filter") + if actualFilter != expectedFilter { + return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) + } + actualFilters := query.Get("filters") + if actualFilters != "" { + return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.24", + } + + filters := filters.NewArgs() + filters.Add("reference", "image:tag") + + options := types.ImageListOptions{ + Filters: filters, + } + + images, err := client.ImageList(context.Background(), options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } +} diff --git a/client/image_load.go b/client/image_load.go new file mode 100644 index 0000000..77aaf1a --- /dev/null +++ b/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/client/image_load_test.go b/client/image_load_test.go new file mode 100644 index 0000000..68dc14f --- /dev/null +++ b/client/image_load_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageLoadError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageLoad(context.Background(), nil, true) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageLoad(t *testing.T) { + expectedURL := "/images/load" + expectedInput := "inputBody" + expectedOutput := "outputBody" + loadCases := []struct { + quiet bool + responseContentType string + expectedResponseJSON bool + expectedQueryParams map[string]string + }{ + { + quiet: false, + responseContentType: "text/plain", + expectedResponseJSON: false, + expectedQueryParams: map[string]string{ + "quiet": "0", + }, + }, + { + quiet: true, + responseContentType: "application/json", + expectedResponseJSON: true, + expectedQueryParams: map[string]string{ + "quiet": "1", + }, + }, + } + for _, loadCase := range loadCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + contentType := req.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) + } + query := req.URL.Query() + for key, expected := range loadCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + headers := http.Header{} + headers.Add("Content-Type", loadCase.responseContentType) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + Header: headers, + }, nil + }), + } + + input := bytes.NewReader([]byte(expectedInput)) + imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) + if err != nil { + t.Fatal(err) + } + if imageLoadResponse.JSON != loadCase.expectedResponseJSON { + t.Fatalf("expected a JSON response, was not.") + } + body, err := ioutil.ReadAll(imageLoadResponse.Body) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected %s, got %s", expectedOutput, string(body)) + } + } +} diff --git a/client/image_prune.go b/client/image_prune.go new file mode 100644 index 0000000..5ef98b7 --- /dev/null +++ b/client/image_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/client/image_prune_test.go b/client/image_prune_test.go new file mode 100644 index 0000000..453f84a --- /dev/null +++ b/client/image_prune_test.go @@ -0,0 +1,119 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestImagesPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.ImagesPrune(context.Background(), filters) + assert.EqualError(t, err, "Error response from daemon: Server error") +} + +func TestImagesPrune(t *testing.T) { + expectedURL := "/v1.25/images/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.ImagesPruneReport{ + ImagesDeleted: []types.ImageDeleteResponseItem{ + { + Deleted: "image_id1", + }, + { + Deleted: "image_id2", + }, + }, + SpaceReclaimed: 9999, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.ImagesPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.ImagesDeleted, 2) + assert.Equal(t, uint64(9999), report.SpaceReclaimed) + } +} diff --git a/client/image_pull.go b/client/image_pull.go new file mode 100644 index 0000000..a72b9bf --- /dev/null +++ b/client/image_pull.go @@ -0,0 +1,61 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/client/image_pull_test.go b/client/image_pull_test.go new file mode 100644 index 0000000..ab49d2d --- /dev/null +++ b/client/image_pull_test.go @@ -0,0 +1,199 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePullReferenceParseError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) + if err == nil || !strings.Contains(err.Error(), "invalid reference format") { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePullAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePullStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != "myimage" { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) + } + tag := query.Get("tag") + if tag != "latest" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePullWithoutErrors(t *testing.T) { + expectedURL := "/images/create" + expectedOutput := "hello world" + pullCases := []struct { + all bool + reference string + expectedImage string + expectedTag string + }{ + { + all: false, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "latest", + }, + { + all: false, + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + { + all: true, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + all: true, + reference: "myimage:anything", + expectedImage: "myimage", + expectedTag: "", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != pullCase.expectedImage { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) + } + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ + All: pullCase.all, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/client/image_push.go b/client/image_push.go new file mode 100644 index 0000000..410d2fb --- /dev/null +++ b/client/image_push.go @@ -0,0 +1,56 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + tag := "" + name := reference.FamiliarName(ref) + + if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/client/image_push_test.go b/client/image_push_test.go new file mode 100644 index 0000000..f93debf --- /dev/null +++ b/client/image_push_test.go @@ -0,0 +1,180 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePushReferenceError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) + if err == nil || !strings.Contains(err.Error(), "invalid reference format") { + t.Fatalf("expected an error, got %v", err) + } + // An canonical reference cannot be pushed + _, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{}) + if err == nil || err.Error() != "cannot push a digest reference" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePushAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePushStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/myimage/push" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != "tag" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "tag", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePushWithoutErrors(t *testing.T) { + expectedOutput := "hello world" + expectedURLFormat := "/images/%s/push" + pullCases := []struct { + reference string + expectedImage string + expectedTag string + }{ + { + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + expectedURL := fmt.Sprintf(expectedURLFormat, pullCase.expectedImage) + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePush(context.Background(), pullCase.reference, types.ImagePushOptions{}) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/client/image_remove.go b/client/image_remove.go new file mode 100644 index 0000000..6921209 --- /dev/null +++ b/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDeleteResponseItem + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/client/image_remove_test.go b/client/image_remove_test.go new file mode 100644 index 0000000..9856311 --- /dev/null +++ b/client/image_remove_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageRemove(t *testing.T) { + expectedURL := "/images/image_id" + removeCases := []struct { + force bool + pruneChildren bool + expectedQueryParams map[string]string + }{ + { + force: false, + pruneChildren: false, + expectedQueryParams: map[string]string{ + "force": "", + "noprune": "1", + }, + }, { + force: true, + pruneChildren: true, + expectedQueryParams: map[string]string{ + "force": "1", + "noprune": "", + }, + }, + } + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range removeCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + b, err := json.Marshal([]types.ImageDeleteResponseItem{ + { + Untagged: "image_id1", + }, + { + Deleted: "image_id", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ + Force: removeCase.force, + PruneChildren: removeCase.pruneChildren, + }) + if err != nil { + t.Fatal(err) + } + if len(imageDeletes) != 2 { + t.Fatalf("expected 2 deleted images, got %v", imageDeletes) + } + } +} diff --git a/client/image_save.go b/client/image_save.go new file mode 100644 index 0000000..ecac880 --- /dev/null +++ b/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/client/image_save_test.go b/client/image_save_test.go new file mode 100644 index 0000000..8f0cf88 --- /dev/null +++ b/client/image_save_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "testing" + + "golang.org/x/net/context" + + "strings" +) + +func TestImageSaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSave(context.Background(), []string{"nothing"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageSave(t *testing.T) { + expectedURL := "/images/get" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + names := query["names"] + expectedNames := []string{"image_id1", "image_id2"} + if !reflect.DeepEqual(names, expectedNames) { + return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(saveResponse) + if err != nil { + t.Fatal(err) + } + saveResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/client/image_search.go b/client/image_search.go new file mode 100644 index 0000000..b0fcd5c --- /dev/null +++ b/client/image_search.go @@ -0,0 +1,51 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/client/image_search_test.go b/client/image_search_test.go new file mode 100644 index 0000000..b17bbd8 --- /dev/null +++ b/client/image_search_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "encoding/json" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" +) + +func TestImageSearchAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageSearchStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/search" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected 1 result, got %v", results) + } +} + +func TestImageSearchWithoutErrors(t *testing.T) { + expectedURL := "/images/search" + filterArgs := filters.NewArgs() + filterArgs.Add("is-automated", "true") + filterArgs.Add("stars", "3") + + expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + Filters: filterArgs, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected a result, got %v", results) + } +} diff --git a/client/image_tag.go b/client/image_tag.go new file mode 100644 index 0000000..8924f71 --- /dev/null +++ b/client/image_tag.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/image_tag_test.go b/client/image_tag_test.go new file mode 100644 index 0000000..f7a0ee3 --- /dev/null +++ b/client/image_tag_test.go @@ -0,0 +1,143 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageTagError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "repo:tag") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +// Note: this is not testing all the InvalidReference as it's the responsibility +// of distribution/reference package. +func TestImageTagInvalidReference(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") + if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag: invalid reference format` { + t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) + } +} + +func TestImageTagInvalidSourceImageName(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "invalid_source_image_name_", "repo:tag") + if err == nil || err.Error() != "Error parsing reference: \"invalid_source_image_name_\" is not a valid repository/tag: invalid reference format" { + t.Fatalf("expected Parsing Reference Error, got %v", err) + } +} + +func TestImageTagHexSource(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusOK, "OK")), + } + + err := client.ImageTag(context.Background(), "0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "repo:tag") + if err != nil { + t.Fatalf("got error: %v", err) + } +} + +func TestImageTag(t *testing.T) { + expectedURL := "/images/image_id/tag" + tagCases := []struct { + reference string + expectedQueryParams map[string]string + }{ + { + reference: "repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "repository", + "tag": "tag1", + }, + }, { + reference: "another_repository:latest", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "another_repository", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "latest", + }, + }, + } + for _, tagCase := range tagCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range tagCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ImageTag(context.Background(), "image_id", tagCase.reference) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/client/info.go b/client/info.go new file mode 100644 index 0000000..ac07961 --- /dev/null +++ b/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/client/info_test.go b/client/info_test.go new file mode 100644 index 0000000..79f23c8 --- /dev/null +++ b/client/info_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestInfoServerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.Info(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestInfoInvalidResponseJSONError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("invalid json"))), + }, nil + }), + } + _, err := client.Info(context.Background()) + if err == nil || !strings.Contains(err.Error(), "invalid character") { + t.Fatalf("expected a 'invalid character' error, got %v", err) + } +} + +func TestInfo(t *testing.T) { + expectedURL := "/info" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + info := &types.Info{ + ID: "daemonID", + Containers: 3, + } + b, err := json.Marshal(info) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + info, err := client.Info(context.Background()) + if err != nil { + t.Fatal(err) + } + + if info.ID != "daemonID" { + t.Fatalf("expected daemonID, got %s", info.ID) + } + + if info.Containers != 3 { + t.Fatalf("expected 3 containers, got %d", info.Containers) + } +} diff --git a/client/interface.go b/client/interface.go new file mode 100644 index 0000000..1eb3385 --- /dev/null +++ b/client/interface.go @@ -0,0 +1,195 @@ +package client + +import ( + "io" + "net" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + ServerVersion(ctx context.Context) (types.Version, error) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageDelta(ctx context.Context, src, dest string) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/client/interface_experimental.go b/client/interface_experimental.go new file mode 100644 index 0000000..51da98e --- /dev/null +++ b/client/interface_experimental.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/client/interface_stable.go b/client/interface_stable.go new file mode 100644 index 0000000..cc90a3c --- /dev/null +++ b/client/interface_stable.go @@ -0,0 +1,10 @@ +package client + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/client/login.go b/client/login.go new file mode 100644 index 0000000..79219ff --- /dev/null +++ b/client/login.go @@ -0,0 +1,29 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp.statusCode == http.StatusUnauthorized { + return registry.AuthenticateOKBody{}, unauthorizedError{err} + } + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/client/network_connect.go b/client/network_connect.go new file mode 100644 index 0000000..c022c17 --- /dev/null +++ b/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/network_connect_test.go b/client/network_connect_test.go new file mode 100644 index 0000000..91b1a76 --- /dev/null +++ b/client/network_connect_test.go @@ -0,0 +1,111 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +func TestNetworkConnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig != nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestNetworkConnect(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig == nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be not nil, got %v", connect.EndpointConfig) + } + + if connect.EndpointConfig.NetworkID != "NetworkID" { + return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ + NetworkID: "NetworkID", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/network_create.go b/client/network_create.go new file mode 100644 index 0000000..4067a54 --- /dev/null +++ b/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/client/network_create_test.go b/client/network_create_test.go new file mode 100644 index 0000000..0e2457f --- /dev/null +++ b/client/network_create_test.go @@ -0,0 +1,72 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkCreate(t *testing.T) { + expectedURL := "/networks/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkCreateResponse{ + ID: "network_id", + Warning: "warning", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ + CheckDuplicate: true, + Driver: "mydriver", + EnableIPv6: true, + Internal: true, + Options: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if networkResponse.ID != "network_id" { + t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) + } + if networkResponse.Warning != "warning" { + t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) + } +} diff --git a/client/network_disconnect.go b/client/network_disconnect.go new file mode 100644 index 0000000..24b58e3 --- /dev/null +++ b/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/network_disconnect_test.go b/client/network_disconnect_test.go new file mode 100644 index 0000000..b54a2b1 --- /dev/null +++ b/client/network_disconnect_test.go @@ -0,0 +1,64 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkDisconnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkDisconnect(t *testing.T) { + expectedURL := "/networks/network_id/disconnect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var disconnect types.NetworkDisconnect + if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { + return nil, err + } + + if disconnect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) + } + + if !disconnect.Force { + return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/network_inspect.go b/client/network_inspect.go new file mode 100644 index 0000000..848c979 --- /dev/null +++ b/client/network_inspect.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, nil, networkNotFoundError{networkID} + } + return networkResource, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/client/network_inspect_test.go b/client/network_inspect_test.go new file mode 100644 index 0000000..9bfb55d --- /dev/null +++ b/client/network_inspect_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestNetworkInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "nothing", types.NetworkInspectOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "unknown", types.NetworkInspectOptions{}) + if err == nil || !IsErrNetworkNotFound(err) { + t.Fatalf("expected a networkNotFound error, got %v", err) + } +} + +func TestNetworkInspect(t *testing.T) { + expectedURL := "/networks/network_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + var ( + content []byte + err error + ) + if strings.Contains(req.URL.RawQuery, "scope=global") { + return &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + } + + if strings.Contains(req.URL.RawQuery, "verbose=true") { + s := map[string]network.ServiceInfo{ + "web": {}, + } + content, err = json.Marshal(types.NetworkResource{ + Name: "mynetwork", + Services: s, + }) + } else { + content, err = json.Marshal(types.NetworkResource{ + Name: "mynetwork", + }) + } + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{}) + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } + + r, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Verbose: true}) + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } + _, ok := r.Services["web"] + if !ok { + t.Fatalf("expected service `web` missing in the verbose output") + } + + _, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Scope: "global"}) + assert.EqualError(t, err, "Error: No such network: network_id") +} diff --git a/client/network_list.go b/client/network_list.go new file mode 100644 index 0000000..e566a93 --- /dev/null +++ b/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/client/network_list_test.go b/client/network_list_test.go new file mode 100644 index 0000000..4d44349 --- /dev/null +++ b/client/network_list_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestNetworkListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ + Filters: filters.NewArgs(), + }) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkList(t *testing.T) { + expectedURL := "/networks" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + options types.NetworkListOptions + expectedFilters string + }{ + { + options: types.NetworkListOptions{ + Filters: filters.NewArgs(), + }, + expectedFilters: "", + }, { + options: types.NetworkListOptions{ + Filters: noDanglingFilters, + }, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: danglingFilters, + }, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: labelFilters, + }, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal([]types.NetworkResource{ + { + Name: "network", + Driver: "bridge", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResources, err := client.NetworkList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(networkResources) != 1 { + t.Fatalf("expected 1 network resource, got %v", networkResources) + } + } +} diff --git a/client/network_prune.go b/client/network_prune.go new file mode 100644 index 0000000..7352a7f --- /dev/null +++ b/client/network_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/client/network_prune_test.go b/client/network_prune_test.go new file mode 100644 index 0000000..3e4f5d0 --- /dev/null +++ b/client/network_prune_test.go @@ -0,0 +1,112 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestNetworksPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.NetworksPrune(context.Background(), filters) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworksPrune(t *testing.T) { + expectedURL := "/v1.25/networks/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Equal(t, expected, actual) + } + content, err := json.Marshal(types.NetworksPruneReport{ + NetworksDeleted: []string{"network_id1", "network_id2"}, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.NetworksPrune(context.Background(), listCase.filters) + assert.NoError(t, err) + assert.Len(t, report.NetworksDeleted, 2) + } +} diff --git a/client/network_remove.go b/client/network_remove.go new file mode 100644 index 0000000..6bd6748 --- /dev/null +++ b/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/network_remove_test.go b/client/network_remove_test.go new file mode 100644 index 0000000..2a7b964 --- /dev/null +++ b/client/network_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestNetworkRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkRemove(t *testing.T) { + expectedURL := "/networks/network_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/node_inspect.go b/client/node_inspect.go new file mode 100644 index 0000000..abf505d --- /dev/null +++ b/client/node_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Node{}, nil, nodeNotFoundError{nodeID} + } + return swarm.Node{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/client/node_inspect_test.go b/client/node_inspect_test.go new file mode 100644 index 0000000..dca16a8 --- /dev/null +++ b/client/node_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeInspectNodeNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNodeNotFound(err) { + t.Fatalf("expected a nodeNotFoundError error, got %v", err) + } +} + +func TestNodeInspect(t *testing.T) { + expectedURL := "/nodes/node_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Node{ + ID: "node_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodeInspect, _, err := client.NodeInspectWithRaw(context.Background(), "node_id") + if err != nil { + t.Fatal(err) + } + if nodeInspect.ID != "node_id" { + t.Fatalf("expected `node_id`, got %s", nodeInspect.ID) + } +} diff --git a/client/node_list.go b/client/node_list.go new file mode 100644 index 0000000..3e8440f --- /dev/null +++ b/client/node_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/client/node_list_test.go b/client/node_list_test.go new file mode 100644 index 0000000..0251b5c --- /dev/null +++ b/client/node_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NodeList(context.Background(), types.NodeListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeList(t *testing.T) { + expectedURL := "/nodes" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.NodeListOptions + expectedQueryParams map[string]string + }{ + { + options: types.NodeListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.NodeListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Node{ + { + ID: "node_id1", + }, + { + ID: "node_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodes, err := client.NodeList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %v", nodes) + } + } +} diff --git a/client/node_remove.go b/client/node_remove.go new file mode 100644 index 0000000..0a77f3d --- /dev/null +++ b/client/node_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/node_remove_test.go b/client/node_remove_test.go new file mode 100644 index 0000000..f2f8adc --- /dev/null +++ b/client/node_remove_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestNodeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: false}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeRemove(t *testing.T) { + expectedURL := "/nodes/node_id" + + removeCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != removeCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", removeCase.expectedForce, force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: removeCase.force}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/client/node_update.go b/client/node_update.go new file mode 100644 index 0000000..3ca9760 --- /dev/null +++ b/client/node_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/node_update_test.go b/client/node_update_test.go new file mode 100644 index 0000000..613ff10 --- /dev/null +++ b/client/node_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestNodeUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeUpdate(t *testing.T) { + expectedURL := "/nodes/node_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/parse_logs.go b/client/parse_logs.go new file mode 100644 index 0000000..e427f80 --- /dev/null +++ b/client/parse_logs.go @@ -0,0 +1,41 @@ +package client + +// parse_logs.go contains utility helpers for getting information out of docker +// log lines. really, it only contains ParseDetails right now. maybe in the +// future there will be some desire to parse log messages back into a struct? +// that would go here if we did + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +// ParseLogDetails takes a details string of key value pairs in the form +// "k=v,l=w", where the keys and values are url query escaped, and each pair +// is separated by a comma, returns a map. returns an error if the details +// string is not in a valid format +// the exact form of details encoding is implemented in +// api/server/httputils/write_log_stream.go +func ParseLogDetails(details string) (map[string]string, error) { + pairs := strings.Split(details, ",") + detailsMap := make(map[string]string, len(pairs)) + for _, pair := range pairs { + p := strings.SplitN(pair, "=", 2) + // if there is no equals sign, we will only get 1 part back + if len(p) != 2 { + return nil, errors.New("invalid details format") + } + k, err := url.QueryUnescape(p[0]) + if err != nil { + return nil, err + } + v, err := url.QueryUnescape(p[1]) + if err != nil { + return nil, err + } + detailsMap[k] = v + } + return detailsMap, nil +} diff --git a/client/parse_logs_test.go b/client/parse_logs_test.go new file mode 100644 index 0000000..ac7f616 --- /dev/null +++ b/client/parse_logs_test.go @@ -0,0 +1,36 @@ +package client + +import ( + "reflect" + "testing" + + "github.com/pkg/errors" +) + +func TestParseLogDetails(t *testing.T) { + testCases := []struct { + line string + expected map[string]string + err error + }{ + {"key=value", map[string]string{"key": "value"}, nil}, + {"key1=value1,key2=value2", map[string]string{"key1": "value1", "key2": "value2"}, nil}, + {"key+with+spaces=value%3Dequals,asdf%2C=", map[string]string{"key with spaces": "value=equals", "asdf,": ""}, nil}, + {"key=,=nothing", map[string]string{"key": "", "": "nothing"}, nil}, + {"=", map[string]string{"": ""}, nil}, + {"errors", nil, errors.New("invalid details format")}, + } + for _, tc := range testCases { + tc := tc // capture range variable + t.Run(tc.line, func(t *testing.T) { + t.Parallel() + res, err := ParseLogDetails(tc.line) + if err != nil && (err.Error() != tc.err.Error()) { + t.Fatalf("unexpected error parsing logs:\nExpected:\n\t%v\nActual:\n\t%v", tc.err, err) + } + if !reflect.DeepEqual(tc.expected, res) { + t.Errorf("result does not match expected:\nExpected:\n\t%#v\nActual:\n\t%#v", tc.expected, res) + } + }) + } +} diff --git a/client/ping.go b/client/ping.go new file mode 100644 index 0000000..a4c2e2c --- /dev/null +++ b/client/ping.go @@ -0,0 +1,32 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", cli.basePath+"/_ping", nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + if serverResp.header != nil { + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + ping.OSType = serverResp.header.Get("OSType") + } + + err = cli.checkResponseErr(serverResp) + return ping, err +} diff --git a/client/ping_test.go b/client/ping_test.go new file mode 100644 index 0000000..7a4a1a9 --- /dev/null +++ b/client/ping_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "errors" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +// TestPingFail tests that when a server sends a non-successful response that we +// can still grab API details, when set. +// Some of this is just excercising the code paths to make sure there are no +// panics. +func TestPingFail(t *testing.T) { + var withHeader bool + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + if withHeader { + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + } + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, nil + }), + } + + ping, err := client.Ping(context.Background()) + assert.Error(t, err) + assert.Equal(t, false, ping.Experimental) + assert.Equal(t, "", ping.APIVersion) + + withHeader = true + ping2, err := client.Ping(context.Background()) + assert.Error(t, err) + assert.Equal(t, true, ping2.Experimental) + assert.Equal(t, "awesome", ping2.APIVersion) +} + +// TestPingWithError tests the case where there is a protocol error in the ping. +// This test is mostly just testing that there are no panics in this code path. +func TestPingWithError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, errors.New("some error") + }), + } + + ping, err := client.Ping(context.Background()) + assert.Error(t, err) + assert.Equal(t, false, ping.Experimental) + assert.Equal(t, "", ping.APIVersion) +} + +// TestPingSuccess tests that we are able to get the expected API headers/ping +// details on success. +func TestPingSuccess(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, nil + }), + } + ping, err := client.Ping(context.Background()) + assert.Error(t, err) + assert.Equal(t, true, ping.Experimental) + assert.Equal(t, "awesome", ping.APIVersion) +} diff --git a/client/plugin_create.go b/client/plugin_create.go new file mode 100644 index 0000000..27954aa --- /dev/null +++ b/client/plugin_create.go @@ -0,0 +1,26 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/client/plugin_disable.go b/client/plugin_disable.go new file mode 100644 index 0000000..30467db --- /dev/null +++ b/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/plugin_disable_test.go b/client/plugin_disable_test.go new file mode 100644 index 0000000..a4de45b --- /dev/null +++ b/client/plugin_disable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginDisableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginDisable(t *testing.T) { + expectedURL := "/plugins/plugin_name/disable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/plugin_enable.go b/client/plugin_enable.go new file mode 100644 index 0000000..95517c4 --- /dev/null +++ b/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/plugin_enable_test.go b/client/plugin_enable_test.go new file mode 100644 index 0000000..b276813 --- /dev/null +++ b/client/plugin_enable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginEnableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginEnable(t *testing.T) { + expectedURL := "/plugins/plugin_name/enable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/plugin_inspect.go b/client/plugin_inspect.go new file mode 100644 index 0000000..89f39ee --- /dev/null +++ b/client/plugin_inspect.go @@ -0,0 +1,32 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return nil, nil, pluginNotFoundError{name} + } + return nil, nil, err + } + + defer ensureReaderClosed(resp) + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/client/plugin_inspect_test.go b/client/plugin_inspect_test.go new file mode 100644 index 0000000..fae407e --- /dev/null +++ b/client/plugin_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.PluginInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginInspect(t *testing.T) { + expectedURL := "/plugins/plugin_name" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.Plugin{ + ID: "plugin_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + pluginInspect, _, err := client.PluginInspectWithRaw(context.Background(), "plugin_name") + if err != nil { + t.Fatal(err) + } + if pluginInspect.ID != "plugin_id" { + t.Fatalf("expected `plugin_id`, got %s", pluginInspect.ID) + } +} diff --git a/client/plugin_install.go b/client/plugin_install.go new file mode 100644 index 0000000..ce3e050 --- /dev/null +++ b/client/plugin_install.go @@ -0,0 +1,113 @@ +package client + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/client/plugin_list.go b/client/plugin_list.go new file mode 100644 index 0000000..3acde3b --- /dev/null +++ b/client/plugin_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/client/plugin_list_test.go b/client/plugin_list_test.go new file mode 100644 index 0000000..6887079 --- /dev/null +++ b/client/plugin_list_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestPluginListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginList(t *testing.T) { + expectedURL := "/plugins" + + enabledFilters := filters.NewArgs() + enabledFilters.Add("enabled", "true") + + capabilityFilters := filters.NewArgs() + capabilityFilters.Add("capability", "volumedriver") + capabilityFilters.Add("capability", "authz") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.NewArgs(), + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + filters: enabledFilters, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"enabled":{"true":true}}`, + }, + }, + { + filters: capabilityFilters, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"capability":{"authz":true,"volumedriver":true}}`, + }, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]*types.Plugin{ + { + ID: "plugin_id1", + }, + { + ID: "plugin_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + plugins, err := client.PluginList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(plugins) != 2 { + t.Fatalf("expected 2 plugins, got %v", plugins) + } + } +} diff --git a/client/plugin_push.go b/client/plugin_push.go new file mode 100644 index 0000000..1e5f963 --- /dev/null +++ b/client/plugin_push.go @@ -0,0 +1,17 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/client/plugin_push_test.go b/client/plugin_push_test.go new file mode 100644 index 0000000..d9f70cd --- /dev/null +++ b/client/plugin_push_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginPushError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginPush(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + auth := req.Header.Get("X-Registry-Auth") + if auth != "authtoken" { + return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/plugin_remove.go b/client/plugin_remove.go new file mode 100644 index 0000000..b017e4d --- /dev/null +++ b/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/plugin_remove_test.go b/client/plugin_remove_test.go new file mode 100644 index 0000000..b2d5157 --- /dev/null +++ b/client/plugin_remove_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestPluginRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginRemove(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/plugin_set.go b/client/plugin_set.go new file mode 100644 index 0000000..3260d2a --- /dev/null +++ b/client/plugin_set.go @@ -0,0 +1,12 @@ +package client + +import ( + "golang.org/x/net/context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/plugin_set_test.go b/client/plugin_set_test.go new file mode 100644 index 0000000..2450254 --- /dev/null +++ b/client/plugin_set_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginSetError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginSet(t *testing.T) { + expectedURL := "/plugins/plugin_name/set" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{"arg1"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/plugin_upgrade.go b/client/plugin_upgrade.go new file mode 100644 index 0000000..049ebfa --- /dev/null +++ b/client/plugin_upgrade.go @@ -0,0 +1,39 @@ +package client + +import ( + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) +} diff --git a/client/request.go b/client/request.go new file mode 100644 index 0000000..3e7d43f --- /dev/null +++ b/client/request.go @@ -0,0 +1,262 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int + reqURL *url.URL +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, err + } + if err := cli.checkResponseErr(resp); err != nil { + return resp, err + } + return resp, nil +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} + + resp, err := ctxhttp.Do(ctx, cli.client, req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + switch err { + case context.Canceled, context.DeadlineExceeded: + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header + } + return serverResp, nil +} + +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, response.body, 512) + response.body.Close() + } +} diff --git a/client/request_test.go b/client/request_test.go new file mode 100644 index 0000000..63908ae --- /dev/null +++ b/client/request_test.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// TestSetHostHeader should set fake host for local communications, set real host +// for normal communications. +func TestSetHostHeader(t *testing.T) { + testURL := "/test" + testCases := []struct { + host string + expectedHost string + expectedURLHost string + }{ + { + "unix:///var/run/docker.sock", + "docker", + "/var/run/docker.sock", + }, + { + "npipe:////./pipe/docker_engine", + "docker", + "//./pipe/docker_engine", + }, + { + "tcp://0.0.0.0:4243", + "", + "0.0.0.0:4243", + }, + { + "tcp://localhost:4243", + "", + "localhost:4243", + }, + } + + for c, test := range testCases { + proto, addr, basePath, err := ParseHost(test.host) + if err != nil { + t.Fatal(err) + } + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, testURL) { + return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) + } + if req.Host != test.expectedHost { + return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) + } + if req.URL.Host != test.expectedURLHost { + return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), + }, nil + }), + + proto: proto, + addr: addr, + basePath: basePath, + } + + _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) + if err != nil { + t.Fatal(err) + } + } +} + +// TestPlainTextError tests the server returning an error in plain text for +// backwards compatibility with API versions <1.24. All other tests use +// errors returned as JSON +func TestPlainTextError(t *testing.T) { + client := &Client{ + client: newMockClient(plainTextErrorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} diff --git a/client/secret_create.go b/client/secret_create.go new file mode 100644 index 0000000..4354afe --- /dev/null +++ b/client/secret_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/client/secret_create_test.go b/client/secret_create_test.go new file mode 100644 index 0000000..ccc7349 --- /dev/null +++ b/client/secret_create_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretCreateUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + assert.EqualError(t, err, `"secret create" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretCreateError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretCreate(t *testing.T) { + expectedURL := "/v1.25/secrets/create" + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.SecretCreateResponse{ + ID: "test_secret", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_secret" { + t.Fatalf("expected `test_secret`, got %s", r.ID) + } +} diff --git a/client/secret_inspect.go b/client/secret_inspect.go new file mode 100644 index 0000000..9b60297 --- /dev/null +++ b/client/secret_inspect.go @@ -0,0 +1,37 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Secret{}, nil, secretNotFoundError{id} + } + return swarm.Secret{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/client/secret_inspect_test.go b/client/secret_inspect_test.go new file mode 100644 index 0000000..1581da1 --- /dev/null +++ b/client/secret_inspect_test.go @@ -0,0 +1,78 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + assert.EqualError(t, err, `"secret inspect" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretInspectError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretInspectSecretNotFound(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrSecretNotFound(err) { + t.Fatalf("expected a secretNotFoundError error, got %v", err) + } +} + +func TestSecretInspect(t *testing.T) { + expectedURL := "/v1.25/secrets/secret_id" + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Secret{ + ID: "secret_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secretInspect, _, err := client.SecretInspectWithRaw(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } + if secretInspect.ID != "secret_id" { + t.Fatalf("expected `secret_id`, got %s", secretInspect.ID) + } +} diff --git a/client/secret_list.go b/client/secret_list.go new file mode 100644 index 0000000..0d33ecf --- /dev/null +++ b/client/secret_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/client/secret_list_test.go b/client/secret_list_test.go new file mode 100644 index 0000000..67a94d3 --- /dev/null +++ b/client/secret_list_test.go @@ -0,0 +1,106 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretListUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + assert.EqualError(t, err, `"secret list" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretListError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretList(t *testing.T) { + expectedURL := "/v1.25/secrets" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.SecretListOptions + expectedQueryParams map[string]string + }{ + { + options: types.SecretListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.SecretListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Secret{ + { + ID: "secret_id1", + }, + { + ID: "secret_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secrets, err := client.SecretList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(secrets) != 2 { + t.Fatalf("expected 2 secrets, got %v", secrets) + } + } +} diff --git a/client/secret_remove.go b/client/secret_remove.go new file mode 100644 index 0000000..c5e37af --- /dev/null +++ b/client/secret_remove.go @@ -0,0 +1,13 @@ +package client + +import "golang.org/x/net/context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/secret_remove_test.go b/client/secret_remove_test.go new file mode 100644 index 0000000..bb41edf --- /dev/null +++ b/client/secret_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretRemoveUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + err := client.SecretRemove(context.Background(), "secret_id") + assert.EqualError(t, err, `"secret remove" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretRemoveError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretRemove(t *testing.T) { + expectedURL := "/v1.25/secrets/secret_id" + + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/secret_update.go b/client/secret_update.go new file mode 100644 index 0000000..875a4c9 --- /dev/null +++ b/client/secret_update.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretUpdate attempts to update a Secret +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/secret_update_test.go b/client/secret_update_test.go new file mode 100644 index 0000000..4a79132 --- /dev/null +++ b/client/secret_update_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestSecretUpdateUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + assert.EqualError(t, err, `"secret update" requires API version 1.25, but the Docker daemon API version is 1.24`) +} + +func TestSecretUpdateError(t *testing.T) { + client := &Client{ + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretUpdate(t *testing.T) { + expectedURL := "/v1.25/secrets/secret_id/update" + + client := &Client{ + version: "1.25", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/service_create.go b/client/service_create.go new file mode 100644 index 0000000..c2fc277 --- /dev/null +++ b/client/service_create.go @@ -0,0 +1,112 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var distErr error + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // ensure that the image is tagged + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + + // Contact the registry to retrieve digest and platform information + if options.QueryRegistry { + distributionInspect, err := cli.DistributionInspect(ctx, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + distErr = err + if err == nil { + // now pin by digest if the image doesn't already contain a digest + if img := imageWithDigestString(service.TaskTemplate.ContainerSpec.Image, distributionInspect.Descriptor.Digest); img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + // add platforms that are compatible with the service + service.TaskTemplate.Placement = setServicePlatforms(service.TaskTemplate.Placement, distributionInspect) + } + } + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + + ensureReaderClosed(resp) + return response, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// an empty string if there are no updates. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return "" +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// emptry string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// setServicePlatforms sets Platforms in swarm.Placement to list all +// compatible platforms for the service, as found in distributionInspect +// and returns a pointer to the new or updated swarm.Placement struct. +func setServicePlatforms(placement *swarm.Placement, distributionInspect registrytypes.DistributionInspect) *swarm.Placement { + if placement == nil { + placement = &swarm.Placement{} + } + // reset any existing listed platforms + placement.Platforms = []swarm.Platform{} + for _, p := range distributionInspect.Platforms { + placement.Platforms = append(placement.Platforms, swarm.Platform{ + Architecture: p.Architecture, + OS: p.OS, + }) + } + return placement +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} diff --git a/client/service_create_test.go b/client/service_create_test.go new file mode 100644 index 0000000..eb9e1b5 --- /dev/null +++ b/client/service_create_test.go @@ -0,0 +1,210 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestServiceCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceCreate(t *testing.T) { + expectedURL := "/services/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } +} + +func TestServiceCreateCompatiblePlatforms(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if strings.HasPrefix(req.URL.Path, "/v1.30/services/create") { + var serviceSpec swarm.ServiceSpec + + // check if the /distribution endpoint returned correct output + err := json.NewDecoder(req.Body).Decode(&serviceSpec) + if err != nil { + return nil, err + } + + assert.Equal(t, "foobar:1.0@sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", serviceSpec.TaskTemplate.ContainerSpec.Image) + assert.Len(t, serviceSpec.TaskTemplate.Placement.Platforms, 1) + + p := serviceSpec.TaskTemplate.Placement.Platforms[0] + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_" + p.OS + "_" + p.Architecture, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/") { + b, err := json.Marshal(registrytypes.DistributionInspect{ + Descriptor: v1.Descriptor{ + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", + }, + Platforms: []v1.Platform{ + { + Architecture: "amd64", + OS: "linux", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else { + return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) + } + }), + } + + spec := swarm.ServiceSpec{TaskTemplate: swarm.TaskSpec{ContainerSpec: swarm.ContainerSpec{Image: "foobar:1.0"}}} + + r, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{QueryRegistry: true}) + assert.NoError(t, err) + assert.Equal(t, "service_linux_amd64", r.ID) +} + +func TestServiceCreateDigestPinning(t *testing.T) { + dgst := "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + dgstAlt := "sha256:37ffbf3f7497c07584dc9637ffbf3f7497c0758c0537ffbf3f7497c0c88e2bb7" + serviceCreateImage := "" + pinByDigestTests := []struct { + img string // input image provided by the user + expected string // expected image after digest pinning + }{ + // default registry returns familiar string + {"docker.io/library/alpine", "alpine:latest@" + dgst}, + // provided tag is preserved and digest added + {"alpine:edge", "alpine:edge@" + dgst}, + // image with provided alternative digest remains unchanged + {"alpine@" + dgstAlt, "alpine@" + dgstAlt}, + // image with provided tag and alternative digest remains unchanged + {"alpine:edge@" + dgstAlt, "alpine:edge@" + dgstAlt}, + // image on alternative registry does not result in familiar string + {"alternate.registry/library/alpine", "alternate.registry/library/alpine:latest@" + dgst}, + // unresolvable image does not get a digest + {"cannotresolve", "cannotresolve:latest"}, + } + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if strings.HasPrefix(req.URL.Path, "/v1.30/services/create") { + // reset and set image received by the service create endpoint + serviceCreateImage = "" + var service swarm.ServiceSpec + if err := json.NewDecoder(req.Body).Decode(&service); err != nil { + return nil, fmt.Errorf("could not parse service create request") + } + serviceCreateImage = service.TaskTemplate.ContainerSpec.Image + + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/cannotresolve") { + // unresolvable image + return nil, fmt.Errorf("cannot resolve image") + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/") { + // resolvable images + b, err := json.Marshal(registrytypes.DistributionInspect{ + Descriptor: v1.Descriptor{ + Digest: digest.Digest(dgst), + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } + return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) + }), + } + + // run pin by digest tests + for _, p := range pinByDigestTests { + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: p.img, + }, + }, + }, types.ServiceCreateOptions{QueryRegistry: true}) + + if err != nil { + t.Fatal(err) + } + + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } + + if p.expected != serviceCreateImage { + t.Fatalf("expected image %s, got %s", p.expected, serviceCreateImage) + } + } +} diff --git a/client/service_inspect.go b/client/service_inspect.go new file mode 100644 index 0000000..d7e051e --- /dev/null +++ b/client/service_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Service{}, nil, serviceNotFoundError{serviceID} + } + return swarm.Service{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/client/service_inspect_test.go b/client/service_inspect_test.go new file mode 100644 index 0000000..d53f583 --- /dev/null +++ b/client/service_inspect_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing", types.ServiceInspectOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceInspectServiceNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown", types.ServiceInspectOptions{}) + if err == nil || !IsErrServiceNotFound(err) { + t.Fatalf("expected a serviceNotFoundError error, got %v", err) + } +} + +func TestServiceInspect(t *testing.T) { + expectedURL := "/services/service_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Service{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id", types.ServiceInspectOptions{}) + if err != nil { + t.Fatal(err) + } + if serviceInspect.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", serviceInspect.ID) + } +} diff --git a/client/service_list.go b/client/service_list.go new file mode 100644 index 0000000..c29e6d4 --- /dev/null +++ b/client/service_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/client/service_list_test.go b/client/service_list_test.go new file mode 100644 index 0000000..213981e --- /dev/null +++ b/client/service_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceList(t *testing.T) { + expectedURL := "/services" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ServiceListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ServiceListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ServiceListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Service{ + { + ID: "service_id1", + }, + { + ID: "service_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + services, err := client.ServiceList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(services) != 2 { + t.Fatalf("expected 2 services, got %v", services) + } + } +} diff --git a/client/service_logs.go b/client/service_logs.go new file mode 100644 index 0000000..24384e3 --- /dev/null +++ b/client/service_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/client/service_logs_test.go b/client/service_logs_test.go new file mode 100644 index 0000000..a6d002b --- /dev/null +++ b/client/service_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestServiceLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestServiceLogs(t *testing.T) { + expectedURL := "/services/service_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ServiceLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ServiceLogs(ctx, "service_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/client/service_remove.go b/client/service_remove.go new file mode 100644 index 0000000..a9331f9 --- /dev/null +++ b/client/service_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/service_remove_test.go b/client/service_remove_test.go new file mode 100644 index 0000000..8e2ac25 --- /dev/null +++ b/client/service_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestServiceRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceRemove(t *testing.T) { + expectedURL := "/services/service_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/client/service_update.go b/client/service_update.go new file mode 100644 index 0000000..a72adc9 --- /dev/null +++ b/client/service_update.go @@ -0,0 +1,72 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + distErr error + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + // ensure that the image is tagged + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + + // Contact the registry to retrieve digest and platform information + // This happens only when the image has changed + if options.QueryRegistry { + distributionInspect, err := cli.DistributionInspect(ctx, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + distErr = err + if err == nil { + // now pin by digest if the image doesn't already contain a digest + if img := imageWithDigestString(service.TaskTemplate.ContainerSpec.Image, distributionInspect.Descriptor.Digest); img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + // add platforms that are compatible with the service + service.TaskTemplate.Placement = setServicePlatforms(service.TaskTemplate.Placement, distributionInspect) + } + } + + var response types.ServiceUpdateResponse + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + + ensureReaderClosed(resp) + return response, err +} diff --git a/client/service_update_test.go b/client/service_update_test.go new file mode 100644 index 0000000..76bea17 --- /dev/null +++ b/client/service_update_test.go @@ -0,0 +1,77 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +func TestServiceUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceUpdate(t *testing.T) { + expectedURL := "/services/service_id/update" + + updateCases := []struct { + swarmVersion swarm.Version + expectedVersion string + }{ + { + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 0, + }, + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 10, + }, + expectedVersion: "10", + }, + } + + for _, updateCase := range updateCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + version := req.URL.Query().Get("version") + if version != updateCase.expectedVersion { + return nil, fmt.Errorf("version not set in URL query properly, expected '%s', got %s", updateCase.expectedVersion, version) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + }, nil + }), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", updateCase.swarmVersion, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/client/session.go b/client/session.go new file mode 100644 index 0000000..8ee9162 --- /dev/null +++ b/client/session.go @@ -0,0 +1,19 @@ +package client + +import ( + "net" + "net/http" + + "golang.org/x/net/context" +) + +// DialSession returns a connection that can be used communication with daemon +func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", "/session", nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(req, proto) +} diff --git a/client/session/filesync/diffcopy.go b/client/session/filesync/diffcopy.go new file mode 100644 index 0000000..b15e4ee --- /dev/null +++ b/client/session/filesync/diffcopy.go @@ -0,0 +1,30 @@ +package filesync + +import ( + "time" + + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + "github.com/tonistiigi/fsutil" +) + +func sendDiffCopy(stream grpc.Stream, dir string, excludes []string, progress progressCb) error { + return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{ + ExcludePatterns: excludes, + }, progress) +} + +func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater) error { + st := time.Now() + defer func() { + logrus.Debugf("diffcopy took: %v", time.Since(st)) + }() + var cf fsutil.ChangeFunc + if cu != nil { + cu.MarkSupported(true) + cf = cu.HandleChange + } + + return fsutil.Receive(ds.Context(), ds, dest, cf) +} diff --git a/client/session/filesync/filesync.go b/client/session/filesync/filesync.go new file mode 100644 index 0000000..fa6dafb --- /dev/null +++ b/client/session/filesync/filesync.go @@ -0,0 +1,173 @@ +package filesync + +import ( + "os" + "strings" + + "github.com/docker/docker/client/session" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type fsSyncProvider struct { + root string + excludes []string + p progressCb + doneCh chan error +} + +// NewFSSyncProvider creates a new provider for sending files from client +func NewFSSyncProvider(root string, excludes []string) session.Attachable { + p := &fsSyncProvider{ + root: root, + excludes: excludes, + } + return p +} + +func (sp *fsSyncProvider) Register(server *grpc.Server) { + RegisterFileSyncServer(server, sp) +} + +func (sp *fsSyncProvider) DiffCopy(stream FileSync_DiffCopyServer) error { + return sp.handle("diffcopy", stream) +} +func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error { + return sp.handle("tarstream", stream) +} + +func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error { + var pr *protocol + for _, p := range supportedProtocols { + if method == p.name && isProtoSupported(p.name) { + pr = &p + break + } + } + if pr == nil { + return errors.New("failed to negotiate protocol") + } + + opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object + + var excludes []string + if len(opts["Override-Excludes"]) == 0 || opts["Override-Excludes"][0] != "true" { + excludes = sp.excludes + } + + var progress progressCb + if sp.p != nil { + progress = sp.p + sp.p = nil + } + + var doneCh chan error + if sp.doneCh != nil { + doneCh = sp.doneCh + sp.doneCh = nil + } + err := pr.sendFn(stream, sp.root, excludes, progress) + if doneCh != nil { + if err != nil { + doneCh <- err + } + close(doneCh) + } + return err +} + +func (sp *fsSyncProvider) SetNextProgressCallback(f func(int, bool), doneCh chan error) { + sp.p = f + sp.doneCh = doneCh +} + +type progressCb func(int, bool) + +type protocol struct { + name string + sendFn func(stream grpc.Stream, srcDir string, excludes []string, progress progressCb) error + recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater) error +} + +func isProtoSupported(p string) bool { + // TODO: this should be removed after testing if stability is confirmed + if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" { + return strings.EqualFold(p, override) + } + return true +} + +var supportedProtocols = []protocol{ + { + name: "diffcopy", + sendFn: sendDiffCopy, + recvFn: recvDiffCopy, + }, + { + name: "tarstream", + sendFn: sendTarStream, + recvFn: recvTarStream, + }, +} + +// FSSendRequestOpt defines options for FSSend request +type FSSendRequestOpt struct { + SrcPaths []string + OverrideExcludes bool + DestDir string + CacheUpdater CacheUpdater +} + +// CacheUpdater is an object capable of sending notifications for the cache hash changes +type CacheUpdater interface { + MarkSupported(bool) + HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error +} + +// FSSync initializes a transfer of files +func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { + var pr *protocol + for _, p := range supportedProtocols { + if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) { + pr = &p + break + } + } + if pr == nil { + return errors.New("no fssync handlers") + } + + opts := make(map[string][]string) + if opt.OverrideExcludes { + opts["Override-Excludes"] = []string{"true"} + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client := NewFileSyncClient(c.Conn()) + + var stream grpc.ClientStream + + ctx = metadata.NewContext(ctx, opts) + + switch pr.name { + case "tarstream": + cc, err := client.TarStream(ctx) + if err != nil { + return err + } + stream = cc + case "diffcopy": + cc, err := client.DiffCopy(ctx) + if err != nil { + return err + } + stream = cc + } + + return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater) +} diff --git a/client/session/filesync/filesync.pb.go b/client/session/filesync/filesync.pb.go new file mode 100644 index 0000000..c6ed666 --- /dev/null +++ b/client/session/filesync/filesync.pb.go @@ -0,0 +1,575 @@ +// Code generated by protoc-gen-gogo. +// source: filesync.proto +// DO NOT EDIT! + +/* +Package filesync is a generated protocol buffer package. + +It is generated from these files: + filesync.proto + +It has these top-level messages: + BytesMessage +*/ +package filesync + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// BytesMessage contains a chunk of byte data +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorFilesync, []int{0} } + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*BytesMessage)(nil), "moby.filesync.v1.BytesMessage") +} +func (this *BytesMessage) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*BytesMessage) + if !ok { + that2, ok := that.(BytesMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *BytesMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&filesync.BytesMessage{") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFilesync(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for FileSync service + +type FileSyncClient interface { + DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) + TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) +} + +type fileSyncClient struct { + cc *grpc.ClientConn +} + +func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient { + return &fileSyncClient{cc} +} + +func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) { + stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSync/DiffCopy", opts...) + if err != nil { + return nil, err + } + x := &fileSyncDiffCopyClient{stream} + return x, nil +} + +type FileSync_DiffCopyClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSyncDiffCopyClient struct { + grpc.ClientStream +} + +func (x *fileSyncDiffCopyClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[1], c.cc, "/moby.filesync.v1.FileSync/TarStream", opts...) + if err != nil { + return nil, err + } + x := &fileSyncTarStreamClient{stream} + return x, nil +} + +type FileSync_TarStreamClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSyncTarStreamClient struct { + grpc.ClientStream +} + +func (x *fileSyncTarStreamClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for FileSync service + +type FileSyncServer interface { + DiffCopy(FileSync_DiffCopyServer) error + TarStream(FileSync_TarStreamServer) error +} + +func RegisterFileSyncServer(s *grpc.Server, srv FileSyncServer) { + s.RegisterService(&_FileSync_serviceDesc, srv) +} + +func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSyncServer).DiffCopy(&fileSyncDiffCopyServer{stream}) +} + +type FileSync_DiffCopyServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSyncDiffCopyServer struct { + grpc.ServerStream +} + +func (x *fileSyncDiffCopyServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSyncDiffCopyServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSyncServer).TarStream(&fileSyncTarStreamServer{stream}) +} + +type FileSync_TarStreamServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSyncTarStreamServer struct { + grpc.ServerStream +} + +func (x *fileSyncTarStreamServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSyncTarStreamServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _FileSync_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.FileSync", + HandlerType: (*FileSyncServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DiffCopy", + Handler: _FileSync_DiffCopy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "TarStream", + Handler: _FileSync_TarStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "filesync.proto", +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintFilesync(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func encodeFixed64Filesync(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Filesync(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *BytesMessage) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovFilesync(uint64(l)) + } + return n +} + +func sovFilesync(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozFilesync(x uint64) (n int) { + return sovFilesync(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BytesMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesMessage{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringFilesync(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthFilesync + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFilesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFilesync(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthFilesync + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFilesync(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFilesync = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) } + +var fileDescriptorFilesync = []byte{ + // 198 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49, + 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, + 0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, + 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a, + 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83, + 0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85, + 0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90, + 0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x74, 0x32, + 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, + 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, + 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, + 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, 0x41, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0c, + 0x8d, 0xc5, 0x34, 0x01, 0x00, 0x00, +} diff --git a/client/session/filesync/filesync.proto b/client/session/filesync/filesync.proto new file mode 100644 index 0000000..2fd5b3e --- /dev/null +++ b/client/session/filesync/filesync.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package moby.filesync.v1; + +option go_package = "filesync"; + +service FileSync{ + rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage); + rpc TarStream(stream BytesMessage) returns (stream BytesMessage); +} + +// BytesMessage contains a chunk of byte data +message BytesMessage{ + bytes data = 1; +} \ No newline at end of file diff --git a/client/session/filesync/generate.go b/client/session/filesync/generate.go new file mode 100644 index 0000000..261e876 --- /dev/null +++ b/client/session/filesync/generate.go @@ -0,0 +1,3 @@ +package filesync + +//go:generate protoc --gogoslick_out=plugins=grpc:. filesync.proto diff --git a/client/session/filesync/tarstream.go b/client/session/filesync/tarstream.go new file mode 100644 index 0000000..ee01e30 --- /dev/null +++ b/client/session/filesync/tarstream.go @@ -0,0 +1,83 @@ +package filesync + +import ( + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +func sendTarStream(stream grpc.Stream, dir string, excludes []string, progress progressCb) error { + a, err := archive.TarWithOptions(dir, &archive.TarOptions{ + ExcludePatterns: excludes, + }) + if err != nil { + return err + } + + size := 0 + buf := make([]byte, 1<<15) + t := new(BytesMessage) + for { + n, err := a.Read(buf) + if err != nil { + if err == io.EOF { + break + } + return err + } + t.Data = buf[:n] + + if err := stream.SendMsg(t); err != nil { + return err + } + size += n + if progress != nil { + progress(size, false) + } + } + if progress != nil { + progress(size, true) + } + return nil +} + +func recvTarStream(ds grpc.Stream, dest string, cs CacheUpdater) error { + + pr, pw := io.Pipe() + + go func() { + var ( + err error + t = new(BytesMessage) + ) + for { + if err = ds.RecvMsg(t); err != nil { + if err == io.EOF { + err = nil + } + break + } + _, err = pw.Write(t.Data) + if err != nil { + break + } + } + if err = pw.CloseWithError(err); err != nil { + logrus.Errorf("failed to close tar transfer pipe") + } + }() + + decompressedStream, err := archive.DecompressStream(pr) + if err != nil { + return errors.Wrap(err, "failed to decompress stream") + } + + if err := chrootarchive.Untar(decompressedStream, dest, nil); err != nil { + return errors.Wrap(err, "failed to untar context") + } + return nil +} diff --git a/client/session/grpc.go b/client/session/grpc.go new file mode 100644 index 0000000..0f20b15 --- /dev/null +++ b/client/session/grpc.go @@ -0,0 +1,62 @@ +package session + +import ( + "net" + "time" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { + go func() { + <-ctx.Done() + conn.Close() + }() + logrus.Debugf("serving grpc connection") + (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) +} + +func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) { + dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { + return conn, nil + }) + + cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure()) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create grpc client") + } + + ctx, cancel := context.WithCancel(ctx) + go monitorHealth(ctx, cc, cancel) + + return ctx, cc, nil +} + +func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) { + defer cancelConn() + defer cc.Close() + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(cc) + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + <-ticker.C + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err != nil { + return + } + } + } +} diff --git a/client/session/manager.go b/client/session/manager.go new file mode 100644 index 0000000..023e850 --- /dev/null +++ b/client/session/manager.go @@ -0,0 +1,187 @@ +package session + +import ( + "net/http" + "strings" + "sync" + + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// Caller can invoke requests on the session +type Caller interface { + Context() context.Context + Supports(method string) bool + Conn() *grpc.ClientConn + Name() string + SharedKey() string +} + +type client struct { + Session + cc *grpc.ClientConn + supported map[string]struct{} +} + +// Manager is a controller for accessing currently active sessions +type Manager struct { + sessions map[string]*client + mu sync.Mutex + updateCondition *sync.Cond +} + +// NewManager returns a new Manager +func NewManager() (*Manager, error) { + sm := &Manager{ + sessions: make(map[string]*client), + } + sm.updateCondition = sync.NewCond(&sm.mu) + return sm, nil +} + +// HandleHTTPRequest handles an incoming HTTP request +func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error { + hijacker, ok := w.(http.Hijacker) + if !ok { + return errors.New("handler does not support hijack") + } + + uuid := r.Header.Get(headerSessionUUID) + name := r.Header.Get(headerSessionName) + sharedKey := r.Header.Get(headerSessionSharedKey) + + proto := r.Header.Get("Upgrade") + + sm.mu.Lock() + if _, ok := sm.sessions[uuid]; ok { + sm.mu.Unlock() + return errors.Errorf("session %s already exists", uuid) + } + + if proto == "" { + sm.mu.Unlock() + return errors.New("no upgrade proto in request") + } + + if proto != "h2c" { + sm.mu.Unlock() + return errors.Errorf("protocol %s not supported", proto) + } + + conn, _, err := hijacker.Hijack() + if err != nil { + sm.mu.Unlock() + return errors.Wrap(err, "failed to hijack connection") + } + + resp := &http.Response{ + StatusCode: http.StatusSwitchingProtocols, + ProtoMajor: 1, + ProtoMinor: 1, + Header: http.Header{}, + } + resp.Header.Set("Connection", "Upgrade") + resp.Header.Set("Upgrade", proto) + + // set raw mode + conn.Write([]byte{}) + resp.Write(conn) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ctx, cc, err := grpcClientConn(ctx, conn) + if err != nil { + sm.mu.Unlock() + return err + } + + c := &client{ + Session: Session{ + uuid: uuid, + name: name, + sharedKey: sharedKey, + ctx: ctx, + cancelCtx: cancel, + done: make(chan struct{}), + }, + cc: cc, + supported: make(map[string]struct{}), + } + + for _, m := range r.Header[headerSessionMethod] { + c.supported[strings.ToLower(m)] = struct{}{} + } + sm.sessions[uuid] = c + sm.updateCondition.Broadcast() + sm.mu.Unlock() + + defer func() { + sm.mu.Lock() + delete(sm.sessions, uuid) + sm.mu.Unlock() + }() + + <-c.ctx.Done() + conn.Close() + close(c.done) + + return nil +} + +// Get returns a session by UUID +func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + select { + case <-ctx.Done(): + sm.updateCondition.Broadcast() + } + }() + + var c *client + + sm.mu.Lock() + for { + select { + case <-ctx.Done(): + sm.mu.Unlock() + return nil, errors.Wrapf(ctx.Err(), "no active session for %s", uuid) + default: + } + var ok bool + c, ok = sm.sessions[uuid] + if !ok || c.closed() { + sm.updateCondition.Wait() + continue + } + sm.mu.Unlock() + break + } + + return c, nil +} + +func (c *client) Context() context.Context { + return c.context() +} + +func (c *client) Name() string { + return c.name +} + +func (c *client) SharedKey() string { + return c.sharedKey +} + +func (c *client) Supports(url string) bool { + _, ok := c.supported[strings.ToLower(url)] + return ok +} +func (c *client) Conn() *grpc.ClientConn { + return c.cc +} diff --git a/client/session/session.go b/client/session/session.go new file mode 100644 index 0000000..147486a --- /dev/null +++ b/client/session/session.go @@ -0,0 +1,117 @@ +package session + +import ( + "net" + + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + headerSessionUUID = "X-Docker-Expose-Session-Uuid" + headerSessionName = "X-Docker-Expose-Session-Name" + headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey" + headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method" +) + +// Dialer returns a connection that can be used by the session +type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + +// Attachable defines a feature that can be expsed on a session +type Attachable interface { + Register(*grpc.Server) +} + +// Session is a long running connection between client and a daemon +type Session struct { + uuid string + name string + sharedKey string + ctx context.Context + cancelCtx func() + done chan struct{} + grpcServer *grpc.Server +} + +// NewSession returns a new long running session +func NewSession(name, sharedKey string) (*Session, error) { + uuid := stringid.GenerateRandomID() + s := &Session{ + uuid: uuid, + name: name, + sharedKey: sharedKey, + grpcServer: grpc.NewServer(), + } + + grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer()) + + return s, nil +} + +// Allow enable a given service to be reachable through the grpc session +func (s *Session) Allow(a Attachable) { + a.Register(s.grpcServer) +} + +// UUID returns unique identifier for the session +func (s *Session) UUID() string { + return s.uuid +} + +// Run activates the session +func (s *Session) Run(ctx context.Context, dialer Dialer) error { + ctx, cancel := context.WithCancel(ctx) + s.cancelCtx = cancel + s.done = make(chan struct{}) + + defer cancel() + defer close(s.done) + + meta := make(map[string][]string) + meta[headerSessionUUID] = []string{s.uuid} + meta[headerSessionName] = []string{s.name} + meta[headerSessionSharedKey] = []string{s.sharedKey} + + for name, svc := range s.grpcServer.GetServiceInfo() { + for _, method := range svc.Methods { + meta[headerSessionMethod] = append(meta[headerSessionMethod], MethodURL(name, method.Name)) + } + } + conn, err := dialer(ctx, "h2c", meta) + if err != nil { + return errors.Wrap(err, "failed to dial gRPC") + } + serve(ctx, s.grpcServer, conn) + return nil +} + +// Close closes the session +func (s *Session) Close() error { + if s.cancelCtx != nil && s.done != nil { + s.cancelCtx() + <-s.done + } + return nil +} + +func (s *Session) context() context.Context { + return s.ctx +} + +func (s *Session) closed() bool { + select { + case <-s.context().Done(): + return true + default: + return false + } +} + +// MethodURL returns a gRPC method URL for service and method name +func MethodURL(s, m string) string { + return "/" + s + "/" + m +} diff --git a/client/swarm_get_unlock_key.go b/client/swarm_get_unlock_key.go new file mode 100644 index 0000000..be28d32 --- /dev/null +++ b/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/client/swarm_get_unlock_key_test.go b/client/swarm_get_unlock_key_test.go new file mode 100644 index 0000000..8dd08d9 --- /dev/null +++ b/client/swarm_get_unlock_key_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestSwarmGetUnlockKeyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmGetUnlockKey(context.Background()) + testutil.ErrorContains(t, err, "Error response from daemon: Server error") +} + +func TestSwarmGetUnlockKey(t *testing.T) { + expectedURL := "/swarm/unlockkey" + unlockKey := "SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeE" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + key := types.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + } + + b, err := json.Marshal(key) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + resp, err := client.SwarmGetUnlockKey(context.Background()) + require.NoError(t, err) + assert.Equal(t, unlockKey, resp.UnlockKey) +} diff --git a/client/swarm_init.go b/client/swarm_init.go new file mode 100644 index 0000000..9e65e1c --- /dev/null +++ b/client/swarm_init.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/client/swarm_init_test.go b/client/swarm_init_test.go new file mode 100644 index 0000000..811155a --- /dev/null +++ b/client/swarm_init_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmInitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInit(context.Background(), swarm.InitRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInit(t *testing.T) { + expectedURL := "/swarm/init" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(`"body"`))), + }, nil + }), + } + + resp, err := client.SwarmInit(context.Background(), swarm.InitRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } + if resp != "body" { + t.Fatalf("Expected 'body', got %s", resp) + } +} diff --git a/client/swarm_inspect.go b/client/swarm_inspect.go new file mode 100644 index 0000000..77e72f8 --- /dev/null +++ b/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/client/swarm_inspect_test.go b/client/swarm_inspect_test.go new file mode 100644 index 0000000..6432d17 --- /dev/null +++ b/client/swarm_inspect_test.go @@ -0,0 +1,56 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSwarmInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInspect(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInspect(t *testing.T) { + expectedURL := "/swarm" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + ID: "swarm_id", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + swarmInspect, err := client.SwarmInspect(context.Background()) + if err != nil { + t.Fatal(err) + } + if swarmInspect.ID != "swarm_id" { + t.Fatalf("expected `swarm_id`, got %s", swarmInspect.ID) + } +} diff --git a/client/swarm_join.go b/client/swarm_join.go new file mode 100644 index 0000000..19e5192 --- /dev/null +++ b/client/swarm_join.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/swarm_join_test.go b/client/swarm_join_test.go new file mode 100644 index 0000000..31ef2a7 --- /dev/null +++ b/client/swarm_join_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmJoinError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmJoin(t *testing.T) { + expectedURL := "/swarm/join" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/swarm_leave.go b/client/swarm_leave.go new file mode 100644 index 0000000..3a205cf --- /dev/null +++ b/client/swarm_leave.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/swarm_leave_test.go b/client/swarm_leave_test.go new file mode 100644 index 0000000..c96dac8 --- /dev/null +++ b/client/swarm_leave_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSwarmLeaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmLeave(context.Background(), false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmLeave(t *testing.T) { + expectedURL := "/swarm/leave" + + leaveCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, leaveCase := range leaveCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != leaveCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", leaveCase.expectedForce, force) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmLeave(context.Background(), leaveCase.force) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/client/swarm_unlock.go b/client/swarm_unlock.go new file mode 100644 index 0000000..9ee441f --- /dev/null +++ b/client/swarm_unlock.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/client/swarm_unlock_test.go b/client/swarm_unlock_test.go new file mode 100644 index 0000000..c242d41 --- /dev/null +++ b/client/swarm_unlock_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUnlockError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUnlock(context.Background(), swarm.UnlockRequest{"SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeU"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUnlock(t *testing.T) { + expectedURL := "/swarm/unlock" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUnlock(context.Background(), swarm.UnlockRequest{"SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeU"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/swarm_update.go b/client/swarm_update.go new file mode 100644 index 0000000..7245fd4 --- /dev/null +++ b/client/swarm_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/swarm_update_test.go b/client/swarm_update_test.go new file mode 100644 index 0000000..3b23db0 --- /dev/null +++ b/client/swarm_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUpdate(t *testing.T) { + expectedURL := "/swarm/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/client/task_inspect.go b/client/task_inspect.go new file mode 100644 index 0000000..bc8058f --- /dev/null +++ b/client/task_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + + "golang.org/x/net/context" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Task{}, nil, taskNotFoundError{taskID} + } + return swarm.Task{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/client/task_inspect_test.go b/client/task_inspect_test.go new file mode 100644 index 0000000..148cdad --- /dev/null +++ b/client/task_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.TaskInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskInspect(t *testing.T) { + expectedURL := "/tasks/task_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Task{ + ID: "task_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + taskInspect, _, err := client.TaskInspectWithRaw(context.Background(), "task_id") + if err != nil { + t.Fatal(err) + } + if taskInspect.ID != "task_id" { + t.Fatalf("expected `task_id`, got %s", taskInspect.ID) + } +} diff --git a/client/task_list.go b/client/task_list.go new file mode 100644 index 0000000..66324da --- /dev/null +++ b/client/task_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/client/task_list_test.go b/client/task_list_test.go new file mode 100644 index 0000000..2a9a4c4 --- /dev/null +++ b/client/task_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.TaskList(context.Background(), types.TaskListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskList(t *testing.T) { + expectedURL := "/tasks" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.TaskListOptions + expectedQueryParams map[string]string + }{ + { + options: types.TaskListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.TaskListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Task{ + { + ID: "task_id1", + }, + { + ID: "task_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + tasks, err := client.TaskList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(tasks) != 2 { + t.Fatalf("expected 2 tasks, got %v", tasks) + } + } +} diff --git a/client/task_logs.go b/client/task_logs.go new file mode 100644 index 0000000..2ed1954 --- /dev/null +++ b/client/task_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/client/testdata/ca.pem b/client/testdata/ca.pem new file mode 100644 index 0000000..ad14d47 --- /dev/null +++ b/client/testdata/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC0jCCAbqgAwIBAgIRAILlP5WWLaHkQ/m2ASHP7SowDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMBIxEDAOBgNVBAoTB3ZpbmNlbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD0yZPKAGncoaxaU/QW9tWEHbrvDoGVF/65L8Si/jBrlAgLjhmmV1di +vKG9QPzuU8snxHro3/uCwyA6kTqw0U8bGwHxJq2Bpa6JBYj8N2jMJ+M+sjXgSo2t +E0zIzjTW2Pir3C8qwfrVL6NFp9xClwMD23SFZ0UsEH36NkfyrKBVeM8IOjJd4Wjs +xIcuvF3BTVkji84IJBW2JIKf9ZrzJwUlSCPgptRp4Evdbyp5d+UPxtwxD7qjW4lM +yQQ8vfcC4lKkVx5s/RNJ4fzd5uEgLdEbZ20qt7Zt/bLcxFHpUhH2teA0QjmrOWFh +gbL83s95/+hbSVhsO4hoFW7vTeiCCY4xAgMBAAGjIzAhMA4GA1UdDwEB/wQEAwIC +rDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBY51RHajuDuhO2 +tcm26jeNROzfffnjhvbOVPjSEdo9vI3JpMU/RuQw+nbNcLwJrdjL6UH7tD/36Y+q +NXH+xSIjWFH0zXGxrIUsVrvt6f8CbOvw7vD+gygOG+849PDQMbL6czP8rvXY7vZV +9pdpQfrENk4b5kePRW/6HaGSTvtgN7XOrYD9fp3pm/G534T2e3IxgYMRNwdB9Ul9 +bLwMqQqf4eiqqMs6x4IVmZUkGVMKiFKcvkNg9a+Ozx5pMizHeAezWMcZ5V+QJZVT +8lElSCKZ2Yy2xkcl7aeQMLwcAeZwfTp+Yu9dVzlqXiiBTLd1+LtAQCuKHzmw4Q8k +EvD5m49l +-----END CERTIFICATE----- diff --git a/client/testdata/cert.pem b/client/testdata/cert.pem new file mode 100644 index 0000000..9000ffb --- /dev/null +++ b/client/testdata/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC8DCCAdigAwIBAgIRAJAS1glgcke4q7eCaretwgUwDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMB4xHDAaBgNVBAoME3ZpbmNlbnQuPGJvb3RzdHJhcD4wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQClpvG442dGEvrRgmCrqY4kBml1LVlw2Y7ZDn6B +TKa52+MuGDmfXbO1UhclNqTXjLgAwKjPz/OvnPRxNEUoQEDbBd+Xev7rxTY5TvYI +27YH3fMH2LL2j62jum649abfhZ6ekD5eD8tCn3mnrEOgqRIlK7efPIVixq/ZqU1H +7ez0ggB7dmWHlhnUaxyQOCSnAX/7nKYQXqZgVvGhDeR2jp7GcnhbK/qPrZ/mOm83 +2IjCeYN145opYlzTSp64GYIZz7uqMNcnDKK37ZbS8MYcTjrRaHEiqZVVdIC+ghbx +qYqzbZRVfgztI9jwmifn0mYrN4yt+nhNYwBcRJ4Pv3uLFbo7AgMBAAGjNTAzMA4G +A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MA0GCSqGSIb3DQEBCwUAA4IBAQDg1r7nksjYgDFYEcBbrRrRHddIoK+RVmSBTTrq +8giC77m0srKdh9XTVWK1PUbGfODV1oD8m9QhPE8zPDyYQ8jeXNRSU5wXdkrTRmmY +w/T3SREqmE7CObMtusokHidjYFuqqCR07sJzqBKRlzr3o0EGe3tuEhUlF5ARY028 +eipaDcVlT5ChGcDa6LeJ4e05u4cVap0dd6Rp1w3Rx1AYAecdgtgBMnw1iWdl/nrC +sp26ZXNaAhFOUovlY9VY257AMd9hQV7WvAK4yNEHcckVu3uXTBmDgNSOPtl0QLsL +Kjlj75ksCx8nCln/hCut/0+kGTsGZqdV5c6ktgcGYRir/5Hs +-----END CERTIFICATE----- diff --git a/client/testdata/key.pem b/client/testdata/key.pem new file mode 100644 index 0000000..c0869df --- /dev/null +++ b/client/testdata/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApabxuONnRhL60YJgq6mOJAZpdS1ZcNmO2Q5+gUymudvjLhg5 +n12ztVIXJTak14y4AMCoz8/zr5z0cTRFKEBA2wXfl3r+68U2OU72CNu2B93zB9iy +9o+to7puuPWm34WenpA+Xg/LQp95p6xDoKkSJSu3nzyFYsav2alNR+3s9IIAe3Zl +h5YZ1GsckDgkpwF/+5ymEF6mYFbxoQ3kdo6exnJ4Wyv6j62f5jpvN9iIwnmDdeOa +KWJc00qeuBmCGc+7qjDXJwyit+2W0vDGHE460WhxIqmVVXSAvoIW8amKs22UVX4M +7SPY8Jon59JmKzeMrfp4TWMAXESeD797ixW6OwIDAQABAoIBAHfyAAleL8NfrtnR +S+pApbmUIvxD0AWUooispBE/zWG6xC72P5MTqDJctIGvpYCmVf3Fgvamns7EGYN2 +07Sngc6V3Ca1WqyhaffpIuGbJZ1gqr89u6gotRRexBmNVj13ZTlvPJmjWgxtqQsu +AvHsOkVL+HOGwRaaw24Z1umEcBVCepl7PGTqsLeJUtBUZBiqdJTu4JYLAB6BggBI +OxhHoTWvlNWwzezo2C/IXkXcXD/tp3i5vTn5rAXHSMQkdMAUh7/xJ73Fl36gxZhp +W7NoPKaS9qNh8jhs6p54S7tInb6+mrKtvRFKl5XAR3istXrXteT5UaukpuBbQ/5d +qf4BXuECgYEAzoOKxMee5tG/G9iC6ImNq5xGAZm0OnmteNgIEQj49If1Q68av525 +FioqdC9zV+blfHQqXEIUeum4JAou4xqmB8Lw2H0lYwOJ1IkpUy3QJjU1IrI+U5Qy +ryZuA9cxSTLf1AJFbROsoZDpjaBh0uUQkD/4PHpwXMgHu/3CaJ4nTEkCgYEAzVjE +VWgczWJGyRxmHSeR51ft1jrlChZHEd3HwgLfo854JIj+MGUH4KPLSMIkYNuyiwNQ +W7zdXCB47U8afSL/lPTv1M5+ZsWY6sZAT6gtp/IeU0Va943h9cj10fAOBJaz1H6M +jnZS4jjWhVInE7wpCDVCwDRoHHJ84kb6JeflamMCgYBDQDcKie9HP3q6uLE4xMKr +5gIuNz2n5UQGnGNUGNXp2/SVDArr55MEksqsd19aesi01KeOz74XoNDke6R1NJJo +6KTB+08XhWl3GwuoGL02FBGvsNf3I8W1oBAnlAZqzfRx+CNfuA55ttU318jDgvD3 +6L0QBNdef411PNf4dbhacQKBgAd/e0PHFm4lbYJAaDYeUMSKwGN3KQ/SOmwblgSu +iC36BwcGfYmU1tHMCUsx05Q50W4kA9Ylskt/4AqCPexdz8lHnE4/7/uesXO5I3YF +JQ2h2Jufx6+MXbjUyq0Mv+ZI/m3+5PD6vxIFk0ew9T5SO4lSMIrGHxsSzx6QCuhB +bG4TAoGBAJ5PWG7d2CyCjLtfF8J4NxykRvIQ8l/3kDvDdNrXiXbgonojo2lgRYaM +5LoK9ApN8KHdedpTRipBaDA22Sp5SjMcUE7A6q42PJCL9r+BRYF0foFQx/rqpCff +pVWKgwIPoKnfxDqN1RUgyFcx1jbA3XVJZCuT+wbMuDQ9nlvulD1W +-----END RSA PRIVATE KEY----- diff --git a/client/transport.go b/client/transport.go new file mode 100644 index 0000000..401ab15 --- /dev/null +++ b/client/transport.go @@ -0,0 +1,25 @@ +package client + +import ( + "crypto/tls" + "net/http" +) + +// transportFunc allows us to inject a mock transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type transportFunc func(*http.Request) (*http.Response, error) + +func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/client/utils.go b/client/utils.go new file mode 100644 index 0000000..f3d8877 --- /dev/null +++ b/client/utils.go @@ -0,0 +1,34 @@ +package client + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToParam(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/client/version.go b/client/version.go new file mode 100644 index 0000000..933ceb4 --- /dev/null +++ b/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/client/volume_create.go b/client/volume_create.go new file mode 100644 index 0000000..9620c87 --- /dev/null +++ b/client/volume_create.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/client/volume_create_test.go b/client/volume_create_test.go new file mode 100644 index 0000000..9f1b254 --- /dev/null +++ b/client/volume_create_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeCreate(t *testing.T) { + expectedURL := "/volumes/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.Volume{ + Name: "volume", + Driver: "local", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ + Name: "myvolume", + Driver: "mydriver", + DriverOpts: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if volume.Name != "volume" { + t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) + } + if volume.Driver != "local" { + t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) + } + if volume.Mountpoint != "mountpoint" { + t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) + } +} diff --git a/client/volume_inspect.go b/client/volume_inspect.go new file mode 100644 index 0000000..3860e9b --- /dev/null +++ b/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, nil, volumeNotFoundError{volumeID} + } + return volume, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/client/volume_inspect_test.go b/client/volume_inspect_test.go new file mode 100644 index 0000000..0d1d118 --- /dev/null +++ b/client/volume_inspect_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestVolumeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeInspectNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "unknown") + if err == nil || !IsErrVolumeNotFound(err) { + t.Fatalf("expected a volumeNotFound error, got %v", err) + } +} + +func TestVolumeInspect(t *testing.T) { + expectedURL := "/volumes/volume_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + content, err := json.Marshal(types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + v, err := client.VolumeInspect(context.Background(), "volume_id") + if err != nil { + t.Fatal(err) + } + if v.Name != "name" { + t.Fatalf("expected `name`, got %s", v.Name) + } + if v.Driver != "driver" { + t.Fatalf("expected `driver`, got %s", v.Driver) + } + if v.Mountpoint != "mountpoint" { + t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) + } +} diff --git a/client/volume_list.go b/client/volume_list.go new file mode 100644 index 0000000..32247ce --- /dev/null +++ b/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { + var volumes volumetypes.VolumesListOKBody + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/client/volume_list_test.go b/client/volume_list_test.go new file mode 100644 index 0000000..f29639b --- /dev/null +++ b/client/volume_list_test.go @@ -0,0 +1,98 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeList(t *testing.T) { + expectedURL := "/volumes" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + filters filters.Args + expectedFilters string + }{ + { + filters: filters.NewArgs(), + expectedFilters: "", + }, { + filters: noDanglingFilters, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + filters: danglingFilters, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + filters: labelFilters, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal(volumetypes.VolumesListOKBody{ + Volumes: []*types.Volume{ + { + Name: "volume", + Driver: "local", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(volumeResponse.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) + } + } +} diff --git a/client/volume_prune.go b/client/volume_prune.go new file mode 100644 index 0000000..2e7fea7 --- /dev/null +++ b/client/volume_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/client/volume_remove.go b/client/volume_remove.go new file mode 100644 index 0000000..6c26575 --- /dev/null +++ b/client/volume_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/client/volume_remove_test.go b/client/volume_remove_test.go new file mode 100644 index 0000000..1fe6573 --- /dev/null +++ b/client/volume_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestVolumeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeRemove(t *testing.T) { + expectedURL := "/volumes/volume_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/cmd/docker/main.go b/cmd/docker/main.go new file mode 100644 index 0000000..3396d3c --- /dev/null +++ b/cmd/docker/main.go @@ -0,0 +1,41 @@ +package main + +import ( + "fmt" + "github.com/containerd/containerd/containerd" + containerdShim "github.com/containerd/containerd/containerd-shim" + "github.com/containerd/containerd/ctr" + "github.com/docker/cli/cmd/docker" + "github.com/docker/docker/cmd/dockerd" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libnetwork/cmd/proxy" + "github.com/opencontainers/runc" + + "os" + filepath "path/filepath" +) + +func main() { + if reexec.Init() { + return + } + switch filepath.Base(os.Args[0]) { + case "docker": + docker.Main() + case "dockerd": + dockerd.Main() + case "docker-containerd": + containerd.Main() + case "docker-containerd-shim": + containerdShim.Main() + case "docker-containerd-ctr": + ctr.Main() + case "docker-runc": + runc.Main() + case "docker-proxy": + proxy.Main() + + default: + fmt.Println("Unknown command") + } +} diff --git a/cmd/dockerd/README.md b/cmd/dockerd/README.md new file mode 100644 index 0000000..a8c20b3 --- /dev/null +++ b/cmd/dockerd/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker daemon's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/cmd/dockerd/config.go b/cmd/dockerd/config.go new file mode 100644 index 0000000..b90ea53 --- /dev/null +++ b/cmd/dockerd/config.go @@ -0,0 +1,77 @@ +package dockerd + +import ( + "runtime" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +const ( + // defaultShutdownTimeout is the default shutdown timeout for the daemon + defaultShutdownTimeout = 15 + // defaultTrustKeyFile is the default filename for the trust key + defaultTrustKeyFile = "key.json" +) + +// installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + var maxConcurrentDownloads, maxConcurrentUploads int + + conf.ServiceOptions.InstallCliFlags(flags) + + flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") + flags.Var(opts.NewNamedListOptsRef("delta-storage-opts", &conf.DeltaGraphOptions, nil), "delta-storage-opt", "Delta torage driver options") + flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") + flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") + flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") + flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the docker runtime") + + // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added + // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. + flags.MarkHidden("graph") + + flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent docker state") + flags.StringVar(&conf.DeltaRoot, "delta-data-root", "", "Root directory of read-only docker state used for deltas") + + flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flags.MarkDeprecated("restart", "Please use a restart policy on docker run") + + // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. + if runtime.GOOS != "windows" { + flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") + flags.StringVar(&conf.DeltaGraphDriver, "delta-storage-driver", "", "Storage driver to use") + } + + flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") + flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") + flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") + flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") + flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") + flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") + flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") + flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") + flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") + flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") + flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") + flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") + flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") + flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") + flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") + + flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") + flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") + + flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") + + // "--deprecated-key-path" is to allow configuration of the key used + // for the daemon ID and the deprecated image signing. It was never + // exposed as a command line option but is added here to allow + // overriding the default path in configuration. + flags.Var(opts.NewQuotedString(&conf.TrustKeyPath), "deprecated-key-path", "Path to key file for ID and image signing") + flags.MarkHidden("deprecated-key-path") + + conf.MaxConcurrentDownloads = &maxConcurrentDownloads + conf.MaxConcurrentUploads = &maxConcurrentUploads +} diff --git a/cmd/dockerd/config_common_unix.go b/cmd/dockerd/config_common_unix.go new file mode 100644 index 0000000..30c5b20 --- /dev/null +++ b/cmd/dockerd/config_common_unix.go @@ -0,0 +1,34 @@ +// +build solaris linux freebsd + +package dockerd + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultDataRoot = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// installUnixConfigFlags adds command-line options to the top-level flag parser for +// the current process that are common across Unix platforms. +func installUnixConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + conf.Runtimes = make(map[string]types.Runtime) + + flags.StringVarP(&conf.SocketGroup, "group", "G", "docker", "Group for the unix socket") + flags.StringVar(&conf.BridgeConfig.IP, "bip", "", "Specify network bridge IP") + flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") + flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") + flags.BoolVar(&conf.BridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") + flags.Var(opts.NewNamedRuntimeOpt("runtimes", &conf.Runtimes, config.StockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") + flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") + +} diff --git a/cmd/dockerd/config_experimental.go b/cmd/dockerd/config_experimental.go new file mode 100644 index 0000000..9cb1d26 --- /dev/null +++ b/cmd/dockerd/config_experimental.go @@ -0,0 +1,9 @@ +package dockerd + +import ( + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" +) + +func attachExperimentalFlags(conf *config.Config, cmd *pflag.FlagSet) { +} diff --git a/cmd/dockerd/config_solaris.go b/cmd/dockerd/config_solaris.go new file mode 100644 index 0000000..27c7be0 --- /dev/null +++ b/cmd/dockerd/config_solaris.go @@ -0,0 +1,19 @@ +package dockerd + +import ( + "github.com/docker/docker/daemon/config" + runconfigopts "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then install flags common to unix platforms + installUnixConfigFlags(conf, flags) + + attachExperimentalFlags(conf, flags) +} diff --git a/cmd/dockerd/config_unix.go b/cmd/dockerd/config_unix.go new file mode 100644 index 0000000..f5aee73 --- /dev/null +++ b/cmd/dockerd/config_unix.go @@ -0,0 +1,52 @@ +// +build linux,!solaris freebsd,!solaris + +package dockerd + +import ( + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then install flags common to unix platforms + installUnixConfigFlags(conf, flags) + + conf.Ulimits = make(map[string]*units.Ulimit) + + // Set default value for `--default-shm-size` + conf.ShmSize = opts.MemBytes(config.DefaultShmSize) + + // Then platform-specific install flags + flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", &conf.Ulimits), "default-ulimit", "Default ulimits for containers") + flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") + flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") + flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") + flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") + flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") + flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") + flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") + flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") + flags.BoolVar(&conf.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header") + flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header") + flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") + flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") + flags.StringVar(&conf.ContainerdAddr, "containerd", "", "Path to containerd socket") + flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") + flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") + flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes") + flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary") + flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&conf.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") + flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers") + flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers") + + attachExperimentalFlags(conf, flags) +} diff --git a/cmd/dockerd/config_unix_test.go b/cmd/dockerd/config_unix_test.go new file mode 100644 index 0000000..cc95041 --- /dev/null +++ b/cmd/dockerd/config_unix_test.go @@ -0,0 +1,26 @@ +// +build linux,!solaris freebsd,!solaris + +package dockerd + +import ( + "runtime" + "testing" + + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestDaemonParseShmSize(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ShmSize not supported on Solaris\n") + } + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + conf := &config.Config{} + installConfigFlags(conf, flags) + // By default `--default-shm-size=64M` + assert.Equal(t, int64(64*1024*1024), conf.ShmSize.Value()) + assert.NoError(t, flags.Set("default-shm-size", "128M")) + assert.Equal(t, int64(128*1024*1024), conf.ShmSize.Value()) +} diff --git a/cmd/dockerd/config_windows.go b/cmd/dockerd/config_windows.go new file mode 100644 index 0000000..4aa1d0a --- /dev/null +++ b/cmd/dockerd/config_windows.go @@ -0,0 +1,25 @@ +package dockerd + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile string + defaultDataRoot = filepath.Join(os.Getenv("programdata"), "docker") +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then platform-specific install flags. + flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") + flags.StringVarP(&conf.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") +} diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go new file mode 100644 index 0000000..22d77e7 --- /dev/null +++ b/cmd/dockerd/daemon.go @@ -0,0 +1,571 @@ +package dockerd + +import ( + "context" + "crypto/tls" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + buildbackend "github.com/docker/docker/api/server/backend/build" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" + "github.com/docker/docker/api/server/router/container" + distributionrouter "github.com/docker/docker/api/server/router/distribution" + "github.com/docker/docker/api/server/router/image" + "github.com/docker/docker/api/server/router/network" + pluginrouter "github.com/docker/docker/api/server/router/plugin" + sessionrouter "github.com/docker/docker/api/server/router/session" + swarmrouter "github.com/docker/docker/api/server/router/swarm" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/cli/debug" + "github.com/docker/docker/client/session" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/cluster" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + dopts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/listeners" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/plugin" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/tlsconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *config.Config + configFile *string + flags *pflag.FlagSet + + api *apiserver.Server + d *daemon.Daemon + authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins +} + +// NewDaemonCli returns a daemon CLI +func NewDaemonCli() *DaemonCli { + return &DaemonCli{} +} + +func (cli *DaemonCli) start(opts *daemonOptions) (err error) { + stopc := make(chan bool) + defer close(stopc) + + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + opts.SetDefaultOptions(opts.flags) + + if cli.Config, err = loadDaemonCliConfig(opts); err != nil { + return err + } + cli.configFile = &opts.configFile + cli.flags = opts.flags + + if cli.Config.Debug { + debug.Enable() + } + + if cli.Config.Experimental { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + return fmt.Errorf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + return fmt.Errorf("Failed to set log opts: %v", err) + } + } + + // Create the daemon root before we create ANY other files (PID, or migrate keys) + // to ensure the appropriate ACL is set (particularly relevant on Windows) + if err := daemon.CreateDaemonRoot(cli.Config); err != nil { + return err + } + + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + defer func() { + if err := pf.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + // TODO: extract to newApiServerConfig() + serverConfig := &apiserver.Config{ + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + EnableCors: cli.Config.EnableCors, + CorsHeaders: cli.Config.CorsHeaders, + } + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + ExclusiveRootPools: true, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + return err + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + cli.api = apiserver.New(serverConfig) + + var hosts []string + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) + } + + proto := protoAddrParts[0] + addr := protoAddrParts[1] + + // It's a bad idea to bind to TCP without tlsverify. + if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { + logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting --tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") + } + ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + return err + } + ls = wrapListeners(proto, ls) + // If we're binding to a TCP port, make sure that a container doesn't try to use it. + if proto == "tcp" { + if err := allocateDaemonPort(addr); err != nil { + return err + } + } + logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) + hosts = append(hosts, protoAddrParts[1]) + cli.api.Accept(addr, ls...) + } + + registryService := registry.NewService(cli.Config.ServiceOptions) + containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) + if err != nil { + return err + } + signal.Trap(func() { + cli.stop() + <-stopc // wait for daemonCli.start() to return + }) + + // Notify that the API is active, but before daemon is set up. + preNotifySystem() + + pluginStore := plugin.NewStore() + + if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { + logrus.Fatalf("Error creating middlewares: %v", err) + } + + if system.LCOWSupported() { + logrus.Warnln("LCOW support is enabled - this feature is incomplete") + } + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote, pluginStore) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + + d.StoreHosts(hosts) + + // validate after NewDaemon has restored enabled plugins. Dont change order. + if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { + return fmt.Errorf("Error validating authorization plugin: %v", err) + } + + // TODO: move into startMetricsServer() + if cli.Config.MetricsAddress != "" { + if !d.HasExperimental() { + return fmt.Errorf("metrics-addr is only supported when experimental is enabled") + } + if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { + return err + } + } + + // TODO: createAndStartCluster() + name, _ := os.Hostname() + + // Use a buffered channel to pass changes from store watch API to daemon + // A buffer allows store watch API and daemon processing to not wait for each other + watchStream := make(chan *swarmapi.WatchMessage, 32) + + c, err := cluster.New(cluster.Config{ + Root: cli.Config.Root, + Name: name, + Backend: d, + NetworkSubnetsProvider: d, + DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, + RuntimeRoot: cli.getSwarmRunRoot(), + WatchStream: watchStream, + }) + if err != nil { + logrus.Fatalf("Error creating cluster component: %v", err) + } + d.SetCluster(c) + err = c.Start() + if err != nil { + logrus.Fatalf("Error starting cluster component: %v", err) + } + + // Restart all autostart containers which has a swarm endpoint + // and is not yet running now that we have successfully + // initialized the cluster. + d.RestartSwarmContainers() + + logrus.Info("Daemon has completed initialization") + + cli.d = d + + routerOptions, err := newRouterOptions(cli.Config, d) + if err != nil { + return err + } + routerOptions.api = cli.api + routerOptions.cluster = c + + initRouter(routerOptions) + + // process cluster change notifications + watchCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + go d.ProcessClusterNotifications(watchCtx, watchStream) + + cli.setupConfigReloadTrap() + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go cli.api.Wait(serveAPIWait) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + c.Cleanup() + shutdownDaemon(d) + containerdRemote.Cleanup() + if errAPI != nil { + return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) + } + + return nil +} + +type routerOptions struct { + sessionManager *session.Manager + buildBackend *buildbackend.Backend + buildCache *fscache.FSCache + daemon *daemon.Daemon + api *apiserver.Server + cluster *cluster.Cluster +} + +func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptions, error) { + opts := routerOptions{} + sm, err := session.NewManager() + if err != nil { + return opts, errors.Wrap(err, "failed to create sessionmanager") + } + + builderStateDir := filepath.Join(config.Root, "builder") + + buildCache, err := fscache.NewFSCache(fscache.Opt{ + Backend: fscache.NewNaiveCacheBackend(builderStateDir), + Root: builderStateDir, + GCPolicy: fscache.GCPolicy{ // TODO: expose this in config + MaxSize: 1024 * 1024 * 512, // 512MB + MaxKeepDuration: 7 * 24 * time.Hour, // 1 week + }, + }) + if err != nil { + return opts, errors.Wrap(err, "failed to create fscache") + } + + manager, err := dockerfile.NewBuildManager(daemon, sm, buildCache, daemon.IDMappings()) + if err != nil { + return opts, err + } + + bb, err := buildbackend.NewBackend(daemon, manager, buildCache) + if err != nil { + return opts, errors.Wrap(err, "failed to create buildmanager") + } + + return routerOptions{ + sessionManager: sm, + buildBackend: bb, + buildCache: buildCache, + daemon: daemon, + }, nil +} + +func (cli *DaemonCli) reloadConfig() { + reload := func(config *config.Config) { + + // Revalidate and reload the authorization plugins + if err := validateAuthzPlugins(config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + logrus.Fatalf("Error validating authorization plugin: %v", err) + return + } + cli.authzMiddleware.SetPlugins(config.AuthorizationPlugins) + + if err := cli.d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + + if config.IsValueSet("debug") { + debugEnabled := debug.IsEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + debug.Disable() + cli.api.DisableProfiler() + case config.Debug && !debugEnabled: // enable debug + debug.Enable() + cli.api.EnableProfiler() + } + + } + } + + if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { + logrus.Error(err) + } +} + +func (cli *DaemonCli) stop() { + cli.api.Close() +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon) { + shutdownTimeout := d.ShutdownTimeout() + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + if shutdownTimeout < 0 { + <-ch + logrus.Debug("Clean shutdown succeeded") + return + } + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(time.Duration(shutdownTimeout) * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { + conf := opts.daemonConfig + flags := opts.flags + conf.Debug = opts.Debug + conf.Hosts = opts.Hosts + conf.LogLevel = opts.LogLevel + conf.TLS = opts.TLS + conf.TLSVerify = opts.TLSVerify + conf.CommonTLSOptions = config.CommonTLSOptions{} + + if opts.TLSOptions != nil { + conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile + conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile + conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile + } + + if conf.TrustKeyPath == "" { + conf.TrustKeyPath = filepath.Join( + getDaemonConfDir(conf.Root), + defaultTrustKeyFile) + } + + if flags.Changed("graph") && flags.Changed("data-root") { + return nil, fmt.Errorf(`cannot specify both "--graph" and "--data-root" option`) + } + + if opts.configFile != "" { + c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) + if err != nil { + if flags.Changed("config-file") || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", opts.configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + conf = c + } + } + + if err := config.Validate(conf); err != nil { + return nil, err + } + + if conf.V2Only == false { + logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`) + } + + if flags.Changed("graph") { + logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (17.12) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := daemon.GetConflictFreeLabels(config.Labels) + // if err != nil { + // return nil, err + // } + // config.Labels = newLabels + // + if _, err := config.GetConflictFreeLabels(conf.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if conf.IsValueSet(FlagTLSVerify) { + conf.TLS = true + } + + // ensure that the log level is the one set after merging configurations + setLogLevel(conf.LogLevel) + + return conf, nil +} + +func initRouter(opts routerOptions) { + decoder := runconfig.ContainerDecoder{} + + routers := []router.Router{ + // we need to add the checkpoint router before the container router or the DELETE gets masked + checkpointrouter.NewRouter(opts.daemon, decoder), + container.NewRouter(opts.daemon, decoder), + image.NewRouter(opts.daemon, decoder), + systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache), + volume.NewRouter(opts.daemon), + build.NewRouter(opts.buildBackend, opts.daemon), + sessionrouter.NewRouter(opts.sessionManager), + swarmrouter.NewRouter(opts.cluster), + pluginrouter.NewRouter(opts.daemon.PluginManager()), + distributionrouter.NewRouter(opts.daemon), + } + + if opts.daemon.NetworkControllerEnabled() { + routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) + } + + if opts.daemon.HasExperimental() { + for _, r := range routers { + for _, route := range r.Routes() { + if experimental, ok := route.(router.ExperimentalRoute); ok { + experimental.Enable() + } + } + } + } + + opts.api.InitRouter(debug.IsEnabled(), routers...) +} + +// TODO: remove this from cli and return the authzMiddleware +func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore *plugin.Store) error { + v := cfg.Version + + exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) + s.UseMiddleware(exp) + + vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) + s.UseMiddleware(vm) + + if cfg.EnableCors || cfg.CorsHeaders != "" { + c := middleware.NewCORSMiddleware(cfg.CorsHeaders) + s.UseMiddleware(c) + } + + cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) + cli.Config.AuthzMiddleware = cli.authzMiddleware + s.UseMiddleware(cli.authzMiddleware) + return nil +} + +// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver +// plugins present on the host and available to the daemon +func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { + for _, reqPlugin := range requestedPlugins { + if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { + return err + } + } + return nil +} diff --git a/cmd/dockerd/daemon_freebsd.go b/cmd/dockerd/daemon_freebsd.go new file mode 100644 index 0000000..768127f --- /dev/null +++ b/cmd/dockerd/daemon_freebsd.go @@ -0,0 +1,9 @@ +package dockerd + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/cmd/dockerd/daemon_linux.go b/cmd/dockerd/daemon_linux.go new file mode 100644 index 0000000..e3e1dcc --- /dev/null +++ b/cmd/dockerd/daemon_linux.go @@ -0,0 +1,15 @@ +// +build linux + +package dockerd + +import systemdDaemon "github.com/coreos/go-systemd/daemon" + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify(false, "READY=1") +} diff --git a/cmd/dockerd/daemon_solaris.go b/cmd/dockerd/daemon_solaris.go new file mode 100644 index 0000000..0b19379 --- /dev/null +++ b/cmd/dockerd/daemon_solaris.go @@ -0,0 +1,89 @@ +// +build solaris + +package dockerd + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { +} + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{} + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +func allocateDaemonPort(addr string) error { + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/cmd/dockerd/daemon_test.go b/cmd/dockerd/daemon_test.go new file mode 100644 index 0000000..bb44bd9 --- /dev/null +++ b/cmd/dockerd/daemon_test.go @@ -0,0 +1,145 @@ +package dockerd + +import ( + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func defaultOptions(configFile string) *daemonOptions { + opts := newDaemonOptions(&config.Config{}) + opts.flags = &pflag.FlagSet{} + opts.InstallFlags(opts.flags) + installConfigFlags(opts.daemonConfig, opts.flags) + opts.flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "") + opts.configFile = configFile + return opts +} + +func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { + opts := defaultOptions("") + opts.Debug = true + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + if !loadedConfig.Debug { + t.Fatalf("expected debug to be copied from the common flags, got false") + } +} + +func TestLoadDaemonCliConfigWithTLS(t *testing.T) { + opts := defaultOptions("") + opts.TLSOptions.CAFile = "/tmp/ca.pem" + opts.TLS = true + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "/tmp/ca.pem", loadedConfig.CommonTLSOptions.CAFile) +} + +func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"labels": ["l3=foo"]}`) + defer tempFile.Remove() + configFile := tempFile.Name() + + opts := defaultOptions(configFile) + flags := opts.flags + + assert.NoError(t, flags.Set("config-file", configFile)) + assert.NoError(t, flags.Set("label", "l1=bar")) + assert.NoError(t, flags.Set("label", "l2=baz")) + + _, err := loadDaemonCliConfig(opts) + testutil.ErrorContains(t, err, "as a flag and in the configuration file: labels") +} + +func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": true}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": false}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.True(t, loadedConfig.TLS) +} + +func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.False(t, loadedConfig.TLS) +} + +func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"log-level": "warn"}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "warn", loadedConfig.LogLevel) + assert.Equal(t, logrus.WarnLevel, logrus.GetLevel()) +} + +func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { + content := `{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.Equal(t, "/etc/certs/ca.pem", loadedConfig.CommonTLSOptions.CAFile) + assert.Equal(t, "syslog", loadedConfig.LogConfig.Type) +} + +func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { + content := `{ + "allow-nondistributable-artifacts": ["allow-nondistributable-artifacts.com"], + "registry-mirrors": ["https://mirrors.docker.com"], + "insecure-registries": ["https://insecure.docker.com"] + }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.Len(t, loadedConfig.AllowNondistributableArtifacts, 1) + assert.Len(t, loadedConfig.Mirrors, 1) + assert.Len(t, loadedConfig.InsecureRegistries, 1) +} diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go new file mode 100644 index 0000000..05ae3c8 --- /dev/null +++ b/cmd/dockerd/daemon_unix.go @@ -0,0 +1,125 @@ +// +build !windows,!solaris + +package dockerd + +import ( + "fmt" + "net" + "os" + "os/signal" + "path/filepath" + "strconv" + "syscall" + + "github.com/docker/docker/cmd/dockerd/hack" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/libnetwork/portallocator" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + cli.reloadConfig() + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + if cli.Config.LiveRestoreEnabled { + opts = append(opts, libcontainerd.WithLiveRestore(true)) + } + opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +// allocateDaemonPort ensures that there are no containers +// that try to use any port allocated for the docker server. +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + switch proto { + case "unix": + ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} + case "fd": + for i := range ls { + ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} + } + } + return ls +} diff --git a/cmd/dockerd/daemon_unix_test.go b/cmd/dockerd/daemon_unix_test.go new file mode 100644 index 0000000..f2cf0c9 --- /dev/null +++ b/cmd/dockerd/daemon_unix_test.go @@ -0,0 +1,114 @@ +// +build !windows,!solaris + +// TODO: Create new file for Solaris which tests config parameters +// as described in daemon/config_solaris.go + +package dockerd + +import ( + "testing" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { + content := `{"log-opts": {"max-size": "1k"}}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.Debug = true + opts.LogLevel = "info" + assert.NoError(t, opts.flags.Set("selinux-enabled", "true")) + + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.True(t, loadedConfig.Debug) + assert.Equal(t, "info", loadedConfig.LogLevel) + assert.True(t, loadedConfig.EnableSelinuxSupport) + assert.Equal(t, "json-file", loadedConfig.LogConfig.Type) + assert.Equal(t, "1k", loadedConfig.LogConfig.Config["max-size"]) +} + +func TestLoadDaemonConfigWithNetwork(t *testing.T) { + content := `{"bip": "127.0.0.2", "ip": "127.0.0.1"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.Equal(t, "127.0.0.2", loadedConfig.IP) + assert.Equal(t, "127.0.0.1", loadedConfig.DefaultIP.String()) +} + +func TestLoadDaemonConfigWithMapOptions(t *testing.T) { + content := `{ + "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, + "log-opts": {"tag": "test"} +}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + expectedPath := "/var/lib/docker/discovery_certs/ca.pem" + assert.Equal(t, expectedPath, loadedConfig.ClusterOpts["kv.cacertfile"]) + assert.NotNil(t, loadedConfig.LogConfig.Config) + assert.Equal(t, "test", loadedConfig.LogConfig.Config["tag"]) +} + +func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { + content := `{ "userland-proxy": false }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.False(t, loadedConfig.EnableUserlandProxy) + + // make sure reloading doesn't generate configuration + // conflicts after normalizing boolean values. + reload := func(reloadedConfig *config.Config) { + assert.False(t, reloadedConfig.EnableUserlandProxy) + } + assert.NoError(t, config.Reload(opts.configFile, opts.flags, reload)) +} + +func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + + assert.True(t, loadedConfig.EnableUserlandProxy) +} + +func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) { + content := `{"disable-legacy-registry": false}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + require.NoError(t, err) + require.NotNil(t, loadedConfig) + assert.False(t, loadedConfig.V2Only) +} diff --git a/cmd/dockerd/daemon_windows.go b/cmd/dockerd/daemon_windows.go new file mode 100644 index 0000000..9dc035a --- /dev/null +++ b/cmd/dockerd/daemon_windows.go @@ -0,0 +1,98 @@ +package dockerd + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +var defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + return false +} + +// setDefaultUmask doesn't do anything on windows +func setDefaultUmask() error { + return nil +} + +func getDaemonConfDir(root string) string { + return filepath.Join(root, `\config`) +} + +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { + // start the service now to prevent timeouts waiting for daemon to start + // but still (eventually) complete all requests that are sent after this + if service != nil { + err := service.started() + if err != nil { + logrus.Fatal(err) + } + } +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { + if service != nil { + if err != nil { + logrus.Fatal(err) + } + service.stopped(err) + } +} + +// setupConfigReloadTrap configures a Win32 event to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + go func() { + sa := syscall.SecurityAttributes{ + Length: 0, + } + ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) + if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { + logrus.Debugf("Config reload - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + cli.reloadConfig() + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + return nil +} + +// getLibcontainerdRoot gets the root directory for libcontainerd to store its +// state. The Windows libcontainerd implementation does not need to write a spec +// or state to disk, so this is a no-op. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return "" +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return "" +} + +func allocateDaemonPort(addr string) error { + return nil +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/cmd/dockerd/docker.go b/cmd/dockerd/docker.go new file mode 100644 index 0000000..0b87707 --- /dev/null +++ b/cmd/dockerd/docker.go @@ -0,0 +1,110 @@ +package dockerd + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" +) + +func newDaemonCommand() *cobra.Command { + opts := newDaemonOptions(config.New()) + + cmd := &cobra.Command{ + Use: "docker [OPTIONS]", + Short: "A self-sufficient runtime for containers.", + SilenceUsage: true, + SilenceErrors: true, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + opts.flags = cmd.Flags() + return runDaemon(opts) + }, + } + cli.SetupRootCommand(cmd) + + flags := cmd.Flags() + flags.BoolVarP(&opts.version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "Daemon configuration file") + opts.InstallFlags(flags) + installConfigFlags(opts.daemonConfig, flags) + installServiceFlags(flags) + + return cmd +} + +func runDaemon(opts *daemonOptions) error { + if opts.version { + showVersion() + return nil + } + + daemonCli := NewDaemonCli() + + // Windows specific settings as these are not defaulted. + if runtime.GOOS == "windows" { + if opts.daemonConfig.Pidfile == "" { + opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "balena.pid") + } + if opts.configFile == "" { + opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) + } + } + + // On Windows, this may be launching as a service or with an option to + // register the service. + stop, runAsService, err := initService(daemonCli) + if err != nil { + logrus.Fatal(err) + } + + if stop { + return nil + } + + // If Windows SCM manages the service - no need for PID files + if runAsService { + opts.daemonConfig.Pidfile = "" + } + + err = daemonCli.start(opts) + notifyShutdown(err) + return err +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +// Main of the dockerd +func Main() { + if reexec.Init() { + return + } + + // Set terminal emulation based on platform as required. + _, stdout, stderr := term.StdStreams() + + // @jhowardmsft - maybe there is a historic reason why on non-Windows, stderr is used + // here. However, on Windows it makes no sense and there is no need. + if runtime.GOOS == "windows" { + logrus.SetOutput(stdout) + } else { + logrus.SetOutput(stderr) + } + + cmd := newDaemonCommand() + cmd.SetOutput(stdout) + if err := cmd.Execute(); err != nil { + fmt.Fprintf(stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/cmd/dockerd/docker_windows.go b/cmd/dockerd/docker_windows.go new file mode 100644 index 0000000..7bf0c19 --- /dev/null +++ b/cmd/dockerd/docker_windows.go @@ -0,0 +1,19 @@ +package dockerd + +import ( + "sync/atomic" + + // because it used to be a main package + _ "github.com/docker/docker/autogen/winresources/dockerd" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/cmd/dockerd/hack/malformed_host_override.go b/cmd/dockerd/hack/malformed_host_override.go new file mode 100644 index 0000000..e7175ca --- /dev/null +++ b/cmd/dockerd/hack/malformed_host_override.go @@ -0,0 +1,121 @@ +// +build !windows + +package hack + +import "net" + +// MalformedHostHeaderOverride is a wrapper to be able +// to overcome the 400 Bad request coming from old docker +// clients that send an invalid Host header. +type MalformedHostHeaderOverride struct { + net.Listener +} + +// MalformedHostHeaderOverrideConn wraps the underlying unix +// connection and keeps track of the first read from http.Server +// which just reads the headers. +type MalformedHostHeaderOverrideConn struct { + net.Conn + first bool +} + +var closeConnHeader = []byte("\r\nConnection: close\r") + +// Read reads the first *read* request from http.Server to inspect +// the Host header. If the Host starts with / then we're talking to +// an old docker client which send an invalid Host header. To not +// error out in http.Server we rewrite the first bytes of the request +// to sanitize the Host header itself. +// In case we're not dealing with old docker clients the data is just passed +// to the server w/o modification. +func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { + // http.Server uses a 4k buffer + if l.first && len(b) == 4096 { + // This keeps track of the first read from http.Server which just reads + // the headers + l.first = false + // The first read of the connection by http.Server is done limited to + // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. + // Here we do the first read which gets us all the http headers to + // be inspected and modified below. + c, err := l.Conn.Read(b) + if err != nil { + return c, err + } + + var ( + start, end int + firstLineFeed = -1 + buf []byte + ) + for i := 0; i <= c-1-7; i++ { + if b[i] == '\n' && firstLineFeed == -1 { + firstLineFeed = i + } + if b[i] != '\n' { + continue + } + + if b[i+1] == '\r' && b[i+2] == '\n' { + return c, nil + } + + if b[i+1] != 'H' { + continue + } + if b[i+2] != 'o' { + continue + } + if b[i+3] != 's' { + continue + } + if b[i+4] != 't' { + continue + } + if b[i+5] != ':' { + continue + } + if b[i+6] != ' ' { + continue + } + if b[i+7] != '/' { + continue + } + // ensure clients other than the docker clients do not get this hack + if i != firstLineFeed { + return c, nil + } + start = i + 7 + // now find where the value ends + for ii, bbb := range b[start:c] { + if bbb == '\n' { + end = start + ii + break + } + } + buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) + // strip the value of the host header and + // inject `Connection: close` to ensure we don't reuse this connection + buf = append(buf, b[:start]...) + buf = append(buf, closeConnHeader...) + buf = append(buf, b[end:c]...) + copy(b, buf) + break + } + if len(buf) == 0 { + return c, nil + } + return len(buf), nil + } + return l.Conn.Read(b) +} + +// Accept makes the listener accepts connections and wraps the connection +// in a MalformedHostHeaderOverrideConn initializing first to true. +func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return c, err + } + return &MalformedHostHeaderOverrideConn{c, true}, nil +} diff --git a/cmd/dockerd/hack/malformed_host_override_test.go b/cmd/dockerd/hack/malformed_host_override_test.go new file mode 100644 index 0000000..1a0a60b --- /dev/null +++ b/cmd/dockerd/hack/malformed_host_override_test.go @@ -0,0 +1,124 @@ +// +build !windows + +package hack + +import ( + "bytes" + "io" + "net" + "strings" + "testing" +) + +type bufConn struct { + net.Conn + buf *bytes.Buffer +} + +func (bc *bufConn) Read(b []byte) (int, error) { + return bc.buf.Read(b) +} + +func TestHeaderOverrideHack(t *testing.T) { + tests := [][2][]byte{ + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + }, + { + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + }, + } + + // Test for https://github.com/docker/docker/issues/23045 + h0 := "GET /foo\nUser-Agent: Docker\r\n\r\n" + h0 = h0 + strings.Repeat("a", 4096-len(h0)-1) + "\n" + tests = append(tests, [2][]byte{[]byte(h0), []byte(h0)}) + + for _, pair := range tests { + read := make([]byte, 4096) + client := &bufConn{ + buf: bytes.NewBuffer(pair[0]), + } + l := MalformedHostHeaderOverrideConn{client, true} + + n, err := l.Read(read) + if err != nil && err != io.EOF { + t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) + } + if !bytes.Equal(read[:n], pair[1][:n]) { + t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) + } + } +} + +func BenchmarkWithHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + l := MalformedHostHeaderOverrideConn{client, true} + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + l.first = true // make sure each subsequent run uses the hack parsing + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if n, err := l.Read(read); err != nil && err != io.EOF { + b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) + } + } + } + l.Close() + <-done +} + +func BenchmarkNoHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if _, err := client.Read(read); err != nil && err != io.EOF { + b.Fatal(err) + } + } + } + client.Close() + <-done +} diff --git a/cmd/dockerd/metrics.go b/cmd/dockerd/metrics.go new file mode 100644 index 0000000..a163029 --- /dev/null +++ b/cmd/dockerd/metrics.go @@ -0,0 +1,27 @@ +package dockerd + +import ( + "net" + "net/http" + + "github.com/Sirupsen/logrus" + metrics "github.com/docker/go-metrics" +) + +func startMetricsServer(addr string) error { + if err := allocateDaemonPort(addr); err != nil { + return err + } + l, err := net.Listen("tcp", addr) + if err != nil { + return err + } + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + if err := http.Serve(l, mux); err != nil { + logrus.Errorf("serve metrics api: %s", err) + } + }() + return nil +} diff --git a/cmd/dockerd/options.go b/cmd/dockerd/options.go new file mode 100644 index 0000000..ac86c63 --- /dev/null +++ b/cmd/dockerd/options.go @@ -0,0 +1,123 @@ +package dockerd + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + // DefaultCaFile is the default filename for the CA pem file + DefaultCaFile = "ca.pem" + // DefaultKeyFile is the default filename for the key pem file + DefaultKeyFile = "key.pem" + // DefaultCertFile is the default filename for the cert pem file + DefaultCertFile = "cert.pem" + // FlagTLSVerify is the flag name for the TLS verification option + FlagTLSVerify = "tlsverify" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +type daemonOptions struct { + version bool + configFile string + daemonConfig *config.Config + flags *pflag.FlagSet + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options +} + +// newDaemonOptions returns a new daemonFlags +func newDaemonOptions(config *config.Config) *daemonOptions { + return &daemonOptions{ + daemonConfig: config, + } +} + +// InstallFlags adds flags for the common options on the FlagSet +func (o *daemonOptions) InstallFlags(flags *pflag.FlagSet) { + if dockerCertPath == "" { + dockerCertPath = cliconfig.Dir() + } + + flags.BoolVarP(&o.Debug, "debug", "D", false, "Enable debug mode") + flags.StringVarP(&o.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`) + flags.BoolVar(&o.TLS, "tls", false, "Use TLS; implied by --tlsverify") + flags.BoolVar(&o.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") + + o.TLSOptions = &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, DefaultCaFile), + CertFile: filepath.Join(dockerCertPath, DefaultCertFile), + KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), + } + tlsOptions := o.TLSOptions + flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") + flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") + flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") + + hostOpt := opts.NewNamedListOptsRef("hosts", &o.Hosts, opts.ValidateHost) + flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") +} + +// SetDefaultOptions sets default values for options after flag parsing is +// complete +func (o *daemonOptions) SetDefaultOptions(flags *pflag.FlagSet) { + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on TLS + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need + // to check that here as well + if flags.Changed(FlagTLSVerify) || o.TLSVerify { + o.TLS = true + } + + if !o.TLS { + o.TLSOptions = nil + } else { + tlsOptions := o.TLSOptions + tlsOptions.InsecureSkipVerify = !o.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !flags.Changed("tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !flags.Changed("tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +// setLogLevel sets the logrus logging level +func setLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/cmd/dockerd/options_test.go b/cmd/dockerd/options_test.go new file mode 100644 index 0000000..a1adfd7 --- /dev/null +++ b/cmd/dockerd/options_test.go @@ -0,0 +1,43 @@ +package dockerd + +import ( + "path/filepath" + "testing" + + cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestCommonOptionsInstallFlags(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := newDaemonOptions(&config.Config{}) + opts.InstallFlags(flags) + + err := flags.Parse([]string{ + "--tlscacert=\"/foo/cafile\"", + "--tlscert=\"/foo/cert\"", + "--tlskey=\"/foo/key\"", + }) + assert.NoError(t, err) + assert.Equal(t, "/foo/cafile", opts.TLSOptions.CAFile) + assert.Equal(t, "/foo/cert", opts.TLSOptions.CertFile) + assert.Equal(t, opts.TLSOptions.KeyFile, "/foo/key") +} + +func defaultPath(filename string) string { + return filepath.Join(cliconfig.Dir(), filename) +} + +func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := newDaemonOptions(&config.Config{}) + opts.InstallFlags(flags) + + err := flags.Parse([]string{}) + assert.NoError(t, err) + assert.Equal(t, defaultPath("ca.pem"), opts.TLSOptions.CAFile) + assert.Equal(t, defaultPath("cert.pem"), opts.TLSOptions.CertFile) + assert.Equal(t, defaultPath("key.pem"), opts.TLSOptions.KeyFile) +} diff --git a/cmd/dockerd/service_unsupported.go b/cmd/dockerd/service_unsupported.go new file mode 100644 index 0000000..9de1343 --- /dev/null +++ b/cmd/dockerd/service_unsupported.go @@ -0,0 +1,14 @@ +// +build !windows + +package dockerd + +import ( + "github.com/spf13/pflag" +) + +func initService(daemonCli *DaemonCli) (bool, bool, error) { + return false, false, nil +} + +func installServiceFlags(flags *pflag.FlagSet) { +} diff --git a/cmd/dockerd/service_windows.go b/cmd/dockerd/service_windows.go new file mode 100644 index 0000000..d6f77be --- /dev/null +++ b/cmd/dockerd/service_windows.go @@ -0,0 +1,431 @@ +package dockerd + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/spf13/pflag" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/debug" + "golang.org/x/sys/windows/svc/eventlog" + "golang.org/x/sys/windows/svc/mgr" +) + +var ( + flServiceName *string + flRegisterService *bool + flUnregisterService *bool + flRunService *bool + + setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") + oldStderr syscall.Handle + panicFile *os.File + + service *handler +) + +const ( + // These should match the values in event_messages.mc. + eventInfo = 1 + eventWarn = 1 + eventError = 1 + eventDebug = 2 + eventPanic = 3 + eventFatal = 4 + + eventExtraOffset = 10 // Add this to any event to get a string that supports extended data +) + +func installServiceFlags(flags *pflag.FlagSet) { + flServiceName = flags.String("service-name", "docker", "Set the Windows service name") + flRegisterService = flags.Bool("register-service", false, "Register the service and exit") + flUnregisterService = flags.Bool("unregister-service", false, "Unregister the service and exit") + flRunService = flags.Bool("run-service", false, "") + flags.MarkHidden("run-service") +} + +type handler struct { + tosvc chan bool + fromsvc chan error + daemonCli *DaemonCli +} + +type etwHook struct { + log *eventlog.Log +} + +func (h *etwHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} + +func (h *etwHook) Fire(e *logrus.Entry) error { + var ( + etype uint16 + eid uint32 + ) + + switch e.Level { + case logrus.PanicLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventPanic + case logrus.FatalLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventFatal + case logrus.ErrorLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventError + case logrus.WarnLevel: + etype = windows.EVENTLOG_WARNING_TYPE + eid = eventWarn + case logrus.InfoLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventInfo + case logrus.DebugLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventDebug + default: + return errors.New("unknown level") + } + + // If there is additional data, include it as a second string. + exts := "" + if len(e.Data) > 0 { + fs := bytes.Buffer{} + for k, v := range e.Data { + fs.WriteString(k) + fs.WriteByte('=') + fmt.Fprint(&fs, v) + fs.WriteByte(' ') + } + + exts = fs.String()[:fs.Len()-1] + eid += eventExtraOffset + } + + if h.log == nil { + fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) + return nil + } + + var ( + ss [2]*uint16 + err error + ) + + ss[0], err = syscall.UTF16PtrFromString(e.Message) + if err != nil { + return err + } + + count := uint16(1) + if exts != "" { + ss[1], err = syscall.UTF16PtrFromString(exts) + if err != nil { + return err + } + + count++ + } + + return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) +} + +func getServicePath() (string, error) { + p, err := exec.LookPath(os.Args[0]) + if err != nil { + return "", err + } + return filepath.Abs(p) +} + +func registerService() error { + p, err := getServicePath() + if err != nil { + return err + } + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + depends := []string{} + + // This dependency is required on build 14393 (RS1) + // it is added to the platform in newer builds + if system.GetOSVersion().Build == 14393 { + depends = append(depends, "ConDrv") + } + + c := mgr.Config{ + ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, + StartType: mgr.StartAutomatic, + ErrorControl: mgr.ErrorNormal, + Dependencies: depends, + DisplayName: "Docker Engine", + } + + // Configure the service to launch with the arguments that were just passed. + args := []string{"--run-service"} + for _, a := range os.Args[1:] { + if a != "--register-service" && a != "--unregister-service" { + args = append(args, a) + } + } + + s, err := m.CreateService(*flServiceName, p, c, args...) + if err != nil { + return err + } + defer s.Close() + + // See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go + const ( + scActionNone = 0 + scActionRestart = 1 + scActionReboot = 2 + scActionRunCommand = 3 + + serviceConfigFailureActions = 2 + ) + + type serviceFailureActions struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions uintptr + } + + type scAction struct { + Type uint32 + Delay uint32 + } + t := []scAction{ + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionNone}, + } + lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))} + err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo))) + if err != nil { + return err + } + + return eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) +} + +func unregisterService() error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + s, err := m.OpenService(*flServiceName) + if err != nil { + return err + } + defer s.Close() + + eventlog.Remove(*flServiceName) + err = s.Delete() + if err != nil { + return err + } + return nil +} + +// initService is the entry point for running the daemon as a Windows +// service. It returns an indication to stop (if registering/un-registering); +// an indication of whether it is running as a service; and an error. +func initService(daemonCli *DaemonCli) (bool, bool, error) { + if *flUnregisterService { + if *flRegisterService { + return true, false, errors.New("--register-service and --unregister-service cannot be used together") + } + return true, false, unregisterService() + } + + if *flRegisterService { + return true, false, registerService() + } + + if !*flRunService { + return false, false, nil + } + + interactive, err := svc.IsAnInteractiveSession() + if err != nil { + return false, false, err + } + + h := &handler{ + tosvc: make(chan bool), + fromsvc: make(chan error), + daemonCli: daemonCli, + } + + var log *eventlog.Log + if !interactive { + log, err = eventlog.Open(*flServiceName) + if err != nil { + return false, false, err + } + } + + logrus.AddHook(&etwHook{log}) + logrus.SetOutput(ioutil.Discard) + + service = h + go func() { + if interactive { + err = debug.Run(*flServiceName, h) + } else { + err = svc.Run(*flServiceName, h) + } + + h.fromsvc <- err + }() + + // Wait for the first signal from the service handler. + err = <-h.fromsvc + if err != nil { + return false, false, err + } + return false, true, nil +} + +func (h *handler) started() error { + // This must be delayed until daemonCli initializes Config.Root + err := initPanicFile(filepath.Join(h.daemonCli.Config.Root, "panic.log")) + if err != nil { + return err + } + + h.tosvc <- false + return nil +} + +func (h *handler) stopped(err error) { + logrus.Debugf("Stopping service: %v", err) + h.tosvc <- err != nil + <-h.fromsvc +} + +func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { + s <- svc.Status{State: svc.StartPending, Accepts: 0} + // Unblock initService() + h.fromsvc <- nil + + // Wait for initialization to complete. + failed := <-h.tosvc + if failed { + logrus.Debug("Aborting service start due to failure during initialization") + return true, 1 + } + + s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} + logrus.Debug("Service running") +Loop: + for { + select { + case failed = <-h.tosvc: + break Loop + case c := <-r: + switch c.Cmd { + case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): + h.daemonCli.reloadConfig() + case svc.Interrogate: + s <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + s <- svc.Status{State: svc.StopPending, Accepts: 0} + h.daemonCli.stop() + } + } + } + + removePanicFile() + if failed { + return true, 1 + } + return false, 0 +} + +func initPanicFile(path string) error { + var err error + panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return err + } + + st, err := panicFile.Stat() + if err != nil { + return err + } + + // If there are contents in the file already, move the file out of the way + // and replace it. + if st.Size() > 0 { + panicFile.Close() + os.Rename(path, path+".old") + panicFile, err = os.Create(path) + if err != nil { + return err + } + } + + // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to + // it when it panics. Remember the old stderr to restore it before removing + // the panic file. + sh := syscall.STD_ERROR_HANDLE + h, err := syscall.GetStdHandle(sh) + if err != nil { + return err + } + + oldStderr = h + + r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) + if r == 0 && err != nil { + return err + } + + // Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected) + os.Stderr = os.NewFile(uintptr(panicFile.Fd()), "/dev/stderr") + + // Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether + log.SetOutput(os.Stderr) + + return nil +} + +func removePanicFile() { + if st, err := panicFile.Stat(); err == nil { + if st.Size() == 0 { + sh := syscall.STD_ERROR_HANDLE + setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) + panicFile.Close() + os.Remove(panicFile.Name()) + } + } +} diff --git a/cmd/mobynit/main.go b/cmd/mobynit/main.go new file mode 100644 index 0000000..957c9a4 --- /dev/null +++ b/cmd/mobynit/main.go @@ -0,0 +1,109 @@ +package main + +import ( + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "syscall" + + _ "github.com/docker/docker/daemon/graphdriver/aufs" + _ "github.com/docker/docker/daemon/graphdriver/overlay2" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "golang.org/x/sys/unix" +) + +const ( + LAYER_ROOT = "/docker" + PIVOT_PATH = "/mnt/sysroot/active" +) + +func mountContainer(containerID, graphDriver string) string { + if err := os.MkdirAll("/dev/shm", os.ModePerm); err != nil { + log.Fatal("creating /dev/shm failed:", err) + } + + if err := unix.Mount("shm", "/dev/shm", "tmpfs", 0, ""); err != nil { + log.Fatal("error mounting /dev/shm:", err) + } + defer unix.Unmount("/dev/shm", unix.MNT_DETACH) + + ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: LAYER_ROOT, + MetadataStorePathTemplate: filepath.Join(LAYER_ROOT, "image", "%s", "layerdb"), + IDMappings: &idtools.IDMappings{}, + GraphDriver: graphDriver, + Platform: "linux", + }) + if err != nil { + log.Fatal("error loading layer store:", err) + } + + rwlayer, err := ls.GetRWLayer(containerID) + if err != nil { + log.Fatal("error getting container layer:", err) + } + + newRoot, err := rwlayer.Mount("") + if err != nil { + log.Fatal("error mounting container fs:", err) + } + + if err := unix.Mount("", newRoot, "", unix.MS_REMOUNT, ""); err != nil { + log.Fatal("error remounting container as read/write:", err) + } + defer unix.Mount("", newRoot, "", unix.MS_REMOUNT|unix.MS_RDONLY, "") + + if err := os.MkdirAll(filepath.Join(newRoot, PIVOT_PATH), os.ModePerm); err != nil { + log.Fatal("creating /mnt/sysroot failed:", err) + } + + return newRoot +} + +func main() { + // Any mounts done by initrd will be transfered in the new root + mounts, err := mount.GetMounts() + + rawGraphDriver, err := ioutil.ReadFile("/current/boot/storage-driver") + if err != nil { + log.Fatal("could not get storage driver:", err) + } + graphDriver := strings.TrimSpace(string(rawGraphDriver)) + + current, err := os.Readlink("/current") + if err != nil { + log.Fatal("could not get container ID:", err) + } + containerID := filepath.Base(current) + + if err := unix.Mount("", "/", "", unix.MS_REMOUNT, ""); err != nil { + log.Fatal("error remounting root as read/write:", err) + } + + newRoot := mountContainer(containerID, graphDriver) + + for _, mount := range mounts { + if mount.Mountpoint == "/" { + continue + } + if err := unix.Mount(mount.Mountpoint, filepath.Join(newRoot, mount.Mountpoint), "", unix.MS_MOVE, ""); err != nil { + log.Println("could not move mountpoint:", mount.Mountpoint, err) + } + } + + if err := syscall.PivotRoot(newRoot, filepath.Join(newRoot, PIVOT_PATH)); err != nil { + log.Fatal("error while pivoting root:", err) + } + + if err := unix.Chdir("/"); err != nil { + log.Fatal(err) + } + + if err := syscall.Exec("/sbin/init", os.Args, os.Environ()); err != nil { + log.Fatal("error executing init:", err) + } +} diff --git a/container/archive.go b/container/archive.go new file mode 100644 index 0000000..56e6598 --- /dev/null +++ b/container/archive.go @@ -0,0 +1,76 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// an error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return "", "", err + } + + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/container/container.go b/container/container.go new file mode 100644 index 0000000..f0aa523 --- /dev/null +++ b/container/container.go @@ -0,0 +1,1067 @@ +package container + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + dockertypes "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + agentexec "github.com/docker/swarmkit/agent/exec" + "golang.org/x/net/context" +) + +const configFileName = "config.v2.json" + +const ( + // DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container. + DefaultStopTimeout = 10 +) + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// Container holds the structure defining a container object. +type Container struct { + StreamConfig *stream.Config + // embed for Container to support states directly. + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Managed bool + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + Platform string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + DependencyStore agentexec.DependencyGetter `json:"-"` + SecretReferences []*swarmtypes.SecretReference + ConfigReferences []*swarmtypes.ConfigReference + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext + + // Fields here are specific to Unix platforms + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool + + // Fields here are specific to Windows + NetworkSharedContainerID string +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + if container.Config == nil { + return fmt.Errorf("Invalid container config.json, missing Config property") + } + + // Ensure the platform is set if blank. Assume it is the platform of the + // host OS if not, to ensure containers created before multiple-platform + // support are migrated + if container.Platform == "" { + container.Platform = runtime.GOOS + } + + return container.readHostConfig() +} + +// toDisk saves the container configuration on disk and returns a deep copy. +func (container *Container) toDisk() (*Container, error) { + var ( + buf bytes.Buffer + deepCopy Container + ) + pth, err := container.ConfigPath() + if err != nil { + return nil, err + } + + // Save container settings + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return nil, err + } + defer f.Close() + + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(container); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + deepCopy.HostConfig, err = container.WriteHostConfig() + if err != nil { + return nil, err + } + + return &deepCopy, nil +} + +// CheckpointTo makes the Container's current state visible to queries, and persists state. +// Callers must hold a Container lock. +func (container *Container) CheckpointTo(store ViewDB) error { + deepCopy, err := container.toDisk() + if err != nil { + return err + } + return store.Save(deepCopy) +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container, +// and returns a deep copy of the saved object. Callers must hold a Container lock. +func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error) { + var ( + buf bytes.Buffer + deepCopy containertypes.HostConfig + ) + + pth, err := container.HostConfigPath() + if err != nil { + return nil, err + } + + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return nil, err + } + defer f.Close() + + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(&container.HostConfig); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + return &deepCopy, nil +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error { + if container.Config.WorkingDir == "" { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllAndChownNew(pth, 0755, rootIDs); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + + // Log this here on the daemon side as there's otherwise no indication apart + // from the error being propagated all the way back to the client. This makes + // debugging significantly easier and clearly indicates the error comes from the daemon. + if e != nil { + logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) + } + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + container.RestartManager().Cancel() +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// CheckpointDir returns the directory checkpoints are stored in +func (container *Container) CheckpointDir() string { + return filepath.Join(container.Root, "checkpoints") +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger() (logger.Logger, error) { + cfg := container.HostConfig.LogConfig + initDriver, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("failed to get logging factory: %v", err) + } + info := logger.Info{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + DaemonName: "docker", + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + info.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + + l, err := initDriver(info) + if err != nil { + return nil, err + } + + if containertypes.LogMode(cfg.Config["mode"]) == containertypes.LogModeNonBlock { + bufferSize := int64(-1) + if s, exists := cfg.Config["max-buffer-size"]; exists { + bufferSize, err = units.RAMInBytes(s) + if err != nil { + return nil, err + } + } + l = logger.NewRingLogger(l, info, bufferSize) + } + return l, nil +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// ShouldRestart decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestart() bool { + var health dockertypes.Health + if container.Health != nil { + health = container.Health.Health + } + + shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt), health) + return shouldRestart +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { + var errors []string + for _, volumeMount := range container.MountPoints { + if volumeMount.Volume == nil { + continue + } + + if err := volumeMount.Cleanup(); err != nil { + errors = append(errors, err.Error()) + continue + } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + if len(errors) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + } + return nil +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// StopTimeout returns the timeout (in seconds) used to stop the container. +func (container *Container) StopTimeout() int { + if container.Config.StopTimeout != nil { + return *container.Config.StopTimeout + } + return DefaultStopTimeout +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// GetEndpointInNetwork returns the container's endpoint to the provided network. +func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(container.Name, "/") + return n.EndpointByName(endpointName) +} + +func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + return nil +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} + driverInfo, err := ep.DriverInfo() + if err != nil { + return pm, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return pm, nil + } + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]types.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + } + pm[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return pm, nil + } + + if portMapping, ok := mapData.([]types.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return pm, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + pm[natPort] = append(pm[natPort], natBndg) + } + } + + return pm, nil +} + +// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox +func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm +} + +// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. +func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + networkSettings.Networks[n.Name()].NetworkID = n.ID() + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { + return nil + } + + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() + } + + if iface.Address() != nil { + ones, _ := iface.Address().Mask.Size() + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } + + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 + } + + return nil +} + +// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. +func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.buildPortMapInfo(ep); err != nil { + return err + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + if epInfo.Gateway() != nil { + container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() + } + + return nil +} + +// UpdateSandboxNetworkSettings updates the sandbox ID and Key. +func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { + container.NetworkSettings.SandboxID = sb.ID() + container.NetworkSettings.SandboxKey = sb.Key() + return nil +} + +// BuildJoinOptions builds endpoint Join options from a given network. +func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { + var joinOptions []libnetwork.EndpointOption + if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { + for _, str := range epConfig.Links { + name, alias, err := opts.ParseLink(str) + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) + } + for k, v := range epConfig.DriverOpts { + joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + } + + return joinOptions, nil +} + +// BuildCreateEndpointOptions builds endpoint options from a given network. +func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { + var ( + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + createOptions []libnetwork.EndpointOption + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + + if (!container.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || + container.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + + if epConfig != nil { + ipam := epConfig.IPAMConfig + + if ipam != nil { + var ( + ipList []net.IP + ip, ip6, linkip net.IP + ) + + for _, ips := range ipam.LinkLocalIPs { + if linkip = net.ParseIP(ips); linkip == nil && ips != "" { + return nil, fmt.Errorf("Invalid link-local IP address:%s", ipam.LinkLocalIPs) + } + ipList = append(ipList, linkip) + + } + + if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" { + return nil, fmt.Errorf("Invalid IPv4 address:%s)", ipam.IPv4Address) + } + + if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" { + return nil, fmt.Errorf("Invalid IPv6 address:%s)", ipam.IPv6Address) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionIpam(ip, ip6, ipList, nil)) + + } + + for _, alias := range epConfig.Aliases { + createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) + } + } + + if container.NetworkSettings.Service != nil { + svcCfg := container.NetworkSettings.Service + + var vip string + if svcCfg.VirtualAddresses[n.ID()] != nil { + vip = svcCfg.VirtualAddresses[n.ID()].IPv4 + } + + var portConfigs []*libnetwork.PortConfig + for _, portConfig := range svcCfg.ExposedPorts { + portConfigs = append(portConfigs, &libnetwork.PortConfig{ + Name: portConfig.Name, + Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + + createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) + } + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) + } + + // configs that are applicable only for the endpoint in the network + // to which container was connected to on docker run. + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == container.HostConfig.NetworkMode.NetworkName() || + (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + for k, v := range epConfig.DriverOpts { + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := GetSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { + return createOptions, nil + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + var dns []string + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemonDNS) > 0 { + dns = daemonDNS + } + + if len(dns) > 0 { + createOptions = append(createOptions, + libnetwork.CreateOptionDNS(dns)) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + return createOptions, nil +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager().(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instance connected to container. +func (container *Container) RestartManager() restartmanager.RestartManager { + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) + } + return container.restartManager +} + +// ResetRestartManager initializes new restartmanager based on container config +func (container *Container) ResetRestartManager(resetCount bool) { + if container.restartManager != nil { + container.restartManager.Cancel() + } + if resetCount { + container.RestartCount = 0 + } + container.restartManager = nil +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initializes or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancels attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} + +func (container *Container) startLogging() error { + if container.HostConfig.LogConfig.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.StartLogger() + if err != nil { + return fmt.Errorf("failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// StdinPipe gets the stdin stream of the container +func (container *Container) StdinPipe() io.WriteCloser { + return container.StreamConfig.StdinPipe() +} + +// StdoutPipe gets the stdout stream of the container +func (container *Container) StdoutPipe() io.ReadCloser { + return container.StreamConfig.StdoutPipe() +} + +// StderrPipe gets the stderr stream of the container +func (container *Container) StderrPipe() io.ReadCloser { + return container.StreamConfig.StderrPipe() +} + +// CloseStreams closes the container's stdio streams +func (container *Container) CloseStreams() error { + return container.StreamConfig.CloseStreams() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error { + if err := container.startLogging(); err != nil { + container.Reset(false) + return err + } + + container.StreamConfig.CopyToPipe(iop) + + if container.StreamConfig.Stdin() == nil && !container.Config.Tty { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("error closing stdin: %+v", err) + } + } + } + + return nil +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() string { + return filepath.Join(container.Root, "secrets") +} + +// SecretFilePath returns the path to the location of a secret on the host. +func (container *Container) SecretFilePath(secretRef swarmtypes.SecretReference) string { + return filepath.Join(container.SecretMountPath(), secretRef.SecretID) +} + +func getSecretTargetPath(r *swarmtypes.SecretReference) string { + if filepath.IsAbs(r.File.Name) { + return r.File.Name + } + + return filepath.Join(containerSecretMountPath, r.File.Name) +} + +// ConfigsDirPath returns the path to the directory where configs are stored on +// disk. +func (container *Container) ConfigsDirPath() string { + return filepath.Join(container.Root, "configs") +} + +// ConfigFilePath returns the path to the on-disk location of a config. +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string { + return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID) +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + // TODO @jhowardmsft LCOW Support. This will need revisiting later. + platform := container.Platform + if platform == "" { + platform = runtime.GOOS + } + env := []string{} + if runtime.GOOS != "windows" || (system.LCOWSupported() && platform == "linux") { + env = []string{ + "PATH=" + system.DefaultPathEnv(platform), + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + } + + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} diff --git a/container/container_linux.go b/container/container_linux.go new file mode 100644 index 0000000..4d4c16b --- /dev/null +++ b/container/container_linux.go @@ -0,0 +1,9 @@ +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} diff --git a/container/container_notlinux.go b/container/container_notlinux.go new file mode 100644 index 0000000..768c762 --- /dev/null +++ b/container/container_notlinux.go @@ -0,0 +1,23 @@ +// +build solaris freebsd + +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + //Solaris and FreeBSD do not support the lazy unmount or MNT_DETACH feature. + // Therefore there are separate definitions for this. + return unix.Unmount(path, 0) +} + +// SecretMounts returns the mounts for the secret path +func (container *Container) SecretMounts() []Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} diff --git a/container/container_unit_test.go b/container/container_unit_test.go new file mode 100644 index 0000000..9ba2991 --- /dev/null +++ b/container/container_unit_test.go @@ -0,0 +1,68 @@ +package container + +import ( + "path/filepath" + "testing" + + "github.com/docker/docker/api/types/container" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/signal" +) + +func TestContainerStopSignal(t *testing.T) { + c := &Container{ + Config: &container.Config{}, + } + + def, err := signal.ParseSignal(signal.DefaultStopSignal) + if err != nil { + t.Fatal(err) + } + + s := c.StopSignal() + if s != int(def) { + t.Fatalf("Expected %v, got %v", def, s) + } + + c = &Container{ + Config: &container.Config{StopSignal: "SIGKILL"}, + } + s = c.StopSignal() + if s != 9 { + t.Fatalf("Expected 9, got %v", s) + } +} + +func TestContainerStopTimeout(t *testing.T) { + c := &Container{ + Config: &container.Config{}, + } + + s := c.StopTimeout() + if s != DefaultStopTimeout { + t.Fatalf("Expected %v, got %v", DefaultStopTimeout, s) + } + + stopTimeout := 15 + c = &Container{ + Config: &container.Config{StopTimeout: &stopTimeout}, + } + s = c.StopSignal() + if s != 15 { + t.Fatalf("Expected 15, got %v", s) + } +} + +func TestContainerSecretReferenceDestTarget(t *testing.T) { + ref := &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: "app", + }, + } + + d := getSecretTargetPath(ref) + expected := filepath.Join(containerSecretMountPath, "app") + if d != expected { + t.Fatalf("expected secret dest %q; received %q", expected, d) + } +} diff --git a/container/container_unix.go b/container/container_unix.go new file mode 100644 index 0000000..0cd9312 --- /dev/null +++ b/container/container_unix.go @@ -0,0 +1,475 @@ +// +build linux freebsd solaris + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/volume" + "github.com/opencontainers/selinux/go-selinux/label" + "golang.org/x/sys/unix" +) + +const ( + containerSecretMountPath = "/run/secrets" +) + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + if !container.HasMountFor("/etc/resolv.conf") { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + if !container.HasMountFor("/etc/hostname") { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + if !container.HasMountFor("/etc/hosts") { + label.Relabel(container.HostsPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + return mounts +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + id := stringid.GenerateNonCryptoID() + path, err := v.Mount(id) + if err != nil { + return err + } + + defer func() { + if err := v.Unmount(id); err != nil { + logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + } + }() + if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { + return err + } + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { + if mounted, mErr := mount.Mounted(shmPath); mounted || mErr != nil { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(volume.DefaultPropagationMode), + }) + } + + return mounts +} + +// SecretMounts returns the mounts for the secret path. +func (container *Container) SecretMounts() []Mount { + var mounts []Mount + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + mounts = append(mounts, Mount{ + Source: container.SecretFilePath(*r), + Destination: getSecretTargetPath(r), + Writable: false, + }) + } + + return mounts +} + +// UnmountSecrets unmounts the local tmpfs for secrets +func (container *Container) UnmountSecrets() error { + if _, err := os.Stat(container.SecretMountPath()); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return detachMounted(container.SecretMountPath()) +} + +// ConfigMounts returns the mounts for configs. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + mounts = append(mounts, Mount{ + Source: container.ConfigFilePath(*configRef), + Destination: configRef.File.Name, + Writable: false, + }) + } + + return mounts +} + +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + + // validate NanoCPUs, CPUPeriod, and CPUQuota + // Becuase NanoCPU effectively updates CPUPeriod/CPUQuota, + // once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa. + // In the following we make sure the intended update (resources) does not conflict with the existing (cResource). + if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 { + return fmt.Errorf("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set") + } + if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 { + return fmt.Errorf("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set") + } + if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 { + return fmt.Errorf("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set") + } + if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 { + return fmt.Errorf("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + } + + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.NanoCPUs != 0 { + cResources.NanoCPUs = resources.NanoCPUs + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + // if memory limit smaller than already set memoryswap limit and doesn't + // update the memoryswap limit, then error out. + if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { + return fmt.Errorf("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + } + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + return nil +} + +// DetachAndUnmount uses a detached mount on all mount destinations, then +// unmounts each volume normally. +// This is used from daemon/archive for `docker cp` +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + networkMounts := container.NetworkMounts() + mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, m := range networkMounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, mountPath := range mountPaths { + if err := detachMounted(mountPath); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) + } + } + return container.UnmountVolumes(volumeEventLog) +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.NewArchiver(nil).CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + destStat, err := system.Stat(destination) + if err != nil { + return err + } + + // In some cases, even though UID/GID match and it would effectively be a no-op, + // this can return a permission denied error... for example if this is an NFS + // mount. + // Since it's not really an error that we can't chown to the same UID/GID, don't + // even bother trying in such cases. + if stat.UID() != destStat.UID() || stat.GID() != destStat.GID() { + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + } + + if stat.Mode() != destStat.Mode() { + return os.Chmod(destination, os.FileMode(stat.Mode())) + } + return nil +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + for dest, mnt := range container.MountPoints { + if mnt.Type == mounttypes.TypeTmpfs { + data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + } + return mounts, nil +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return false +} + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} diff --git a/container/container_windows.go b/container/container_windows.go new file mode 100644 index 0000000..0f2a45d --- /dev/null +++ b/container/container_windows.go @@ -0,0 +1,210 @@ +// +build windows + +package container + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/system" +) + +const ( + containerSecretMountPath = `C:\ProgramData\Docker\secrets` + containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets` + containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs` +) + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int +} + +// UnmountIpcMounts unmounts Ipc related mounts. +// This is a NOOP on windows. +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// CreateSecretSymlinks creates symlinks to files in the secret mount. +func (container *Container) CreateSecretSymlinks() error { + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r)) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// SecretMounts returns the mount for the secret path. +// All secrets are stored in a single mount on Windows. Target symlinks are +// created for each secret, pointing to the files in this mount. +func (container *Container) SecretMounts() []Mount { + var mounts []Mount + if len(container.SecretReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.SecretMountPath(), + Destination: containerInternalSecretMountPath, + Writable: false, + }) + } + + return mounts +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return os.RemoveAll(container.SecretMountPath()) +} + +// CreateConfigSymlinks creates symlinks to files in the config mount. +func (container *Container) CreateConfigSymlinks() error { + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(configRef.File.Name) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// ConfigMounts returns the mount for configs. +// All configs are stored in a single mount on Windows. Target symlinks are +// created for each config, pointing to the files in this mount. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + if len(container.ConfigReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.ConfigsDirPath(), + Destination: containerInternalConfigsDirPath, + Writable: false, + }) + } + + return mounts +} + +// DetachAndUnmount unmounts all volumes. +// On Windows it only delegates to `UnmountVolumes` since there is nothing to +// force unmount. +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + return container.UnmountVolumes(volumeEventLog) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + return mounts, nil +} + +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + resources := hostConfig.Resources + if resources.CPUShares != 0 || + resources.Memory != 0 || + resources.NanoCPUs != 0 || + resources.CgroupParent != "" || + resources.BlkioWeight != 0 || + len(resources.BlkioWeightDevice) != 0 || + len(resources.BlkioDeviceReadBps) != 0 || + len(resources.BlkioDeviceWriteBps) != 0 || + len(resources.BlkioDeviceReadIOps) != 0 || + len(resources.BlkioDeviceWriteIOps) != 0 || + resources.CPUPeriod != 0 || + resources.CPUQuota != 0 || + resources.CPURealtimePeriod != 0 || + resources.CPURealtimeRuntime != 0 || + resources.CpusetCpus != "" || + resources.CpusetMems != "" || + len(resources.Devices) != 0 || + len(resources.DeviceCgroupRules) != 0 || + resources.DiskQuota != 0 || + resources.KernelMemory != 0 || + resources.MemoryReservation != 0 || + resources.MemorySwap != 0 || + resources.MemorySwappiness != nil || + resources.OomKillDisable != nil || + resources.PidsLimit != 0 || + len(resources.Ulimits) != 0 || + resources.CPUCount != 0 || + resources.CPUPercent != 0 || + resources.IOMaximumIOps != 0 || + resources.IOMaximumBandwidth != 0 { + return fmt.Errorf("resource updating isn't supported on Windows") + } + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + return nil +} + +// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares +// to combine with a volume path +func cleanResourcePath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(os.PathSeparator), path) +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return true +} + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} diff --git a/container/env.go b/container/env.go new file mode 100644 index 0000000..896a384 --- /dev/null +++ b/container/env.go @@ -0,0 +1,43 @@ +package container + +import ( + "strings" +) + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/container/env_test.go b/container/env_test.go new file mode 100644 index 0000000..4ebf264 --- /dev/null +++ b/container/env_test.go @@ -0,0 +1,24 @@ +package container + +import "testing" + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/", "FOO=foo_default"} + // remove FOO from env + // remove BAR from env (nop) + o = []string{"HOME=/root", "TERM=xterm", "FOO", "BAR"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + t.Logf("default=%v, override=%v, result=%v", d, o, env) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} diff --git a/container/health.go b/container/health.go new file mode 100644 index 0000000..31c5600 --- /dev/null +++ b/container/health.go @@ -0,0 +1,50 @@ +package container + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" +) + +// Health holds the current container health-check state +type Health struct { + types.Health + stop chan struct{} // Write struct{} to stop the monitor +} + +// String returns a human-readable description of the health-check state +func (s *Health) String() string { + // This happens when the monitor has yet to be setup. + if s.Status == "" { + return types.Unhealthy + } + + switch s.Status { + case types.Starting: + return "health: starting" + default: // Healthy and Unhealthy are clear on their own + return s.Status + } +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, +// it returns nil. +func (s *Health) OpenMonitorChannel() chan struct{} { + if s.stop == nil { + logrus.Debug("OpenMonitorChannel") + s.stop = make(chan struct{}) + return s.stop + } + return nil +} + +// CloseMonitorChannel closes any existing monitor channel. +func (s *Health) CloseMonitorChannel() { + if s.stop != nil { + logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + close(s.stop) + s.stop = nil + // unhealthy when the monitor has stopped for compatibility reasons + s.Status = types.Unhealthy + logrus.Debug("CloseMonitorChannel done") + } +} diff --git a/container/history.go b/container/history.go new file mode 100644 index 0000000..c80c2aa --- /dev/null +++ b/container/history.go @@ -0,0 +1,30 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/container/memory_store.go b/container/memory_store.go new file mode 100644 index 0000000..706407a --- /dev/null +++ b/container/memory_store.go @@ -0,0 +1,95 @@ +package container + +import ( + "sync" +) + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + var res *Container + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/container/memory_store_test.go b/container/memory_store_test.go new file mode 100644 index 0000000..8d26d1a --- /dev/null +++ b/container/memory_store_test.go @@ -0,0 +1,106 @@ +package container + +import ( + "testing" + "time" +) + +func TestNewMemoryStore(t *testing.T) { + s := NewMemoryStore() + m, ok := s.(*memoryStore) + if !ok { + t.Fatalf("store is not a memory store %v", s) + } + if m.s == nil { + t.Fatal("expected store map to not be nil") + } +} + +func TestAddContainers(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + if s.Size() != 1 { + t.Fatalf("expected store size 1, got %v", s.Size()) + } +} + +func TestGetContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + c := s.Get("id") + if c == nil { + t.Fatal("expected container to not be nil") + } +} + +func TestDeleteContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + s.Delete("id") + if c := s.Get("id"); c != nil { + t.Fatalf("expected container to be nil after removal, got %v", c) + } + + if s.Size() != 0 { + t.Fatalf("expected store size to be 0, got %v", s.Size()) + } +} + +func TestListContainers(t *testing.T) { + s := NewMemoryStore() + + cont := NewBaseContainer("id", "root") + cont.Created = time.Now() + cont2 := NewBaseContainer("id2", "root") + cont2.Created = time.Now().Add(24 * time.Hour) + + s.Add("id", cont) + s.Add("id2", cont2) + + list := s.List() + if len(list) != 2 { + t.Fatalf("expected list size 2, got %v", len(list)) + } + if list[0].ID != "id2" { + t.Fatalf("expected id2, got %v", list[0].ID) + } +} + +func TestFirstContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + first := s.First(func(cont *Container) bool { + return cont.ID == "id2" + }) + + if first == nil { + t.Fatal("expected container to not be nil") + } + if first.ID != "id2" { + t.Fatalf("expected id2, got %v", first) + } +} + +func TestApplyAllContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + s.ApplyAll(func(cont *Container) { + if cont.ID == "id2" { + cont.ID = "newID" + } + }) + + cont := s.Get("id2") + if cont == nil { + t.Fatal("expected container to not be nil") + } + if cont.ID != "newID" { + t.Fatalf("expected newID, got %v", cont.ID) + } +} diff --git a/container/monitor.go b/container/monitor.go new file mode 100644 index 0000000..f05e72b --- /dev/null +++ b/container/monitor.go @@ -0,0 +1,46 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.StreamConfig.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warn("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/container/mounts_unix.go b/container/mounts_unix.go new file mode 100644 index 0000000..c52abed --- /dev/null +++ b/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/container/mounts_windows.go b/container/mounts_windows.go new file mode 100644 index 0000000..01b327f --- /dev/null +++ b/container/mounts_windows.go @@ -0,0 +1,8 @@ +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` +} diff --git a/container/state.go b/container/state.go new file mode 100644 index 0000000..b87577b --- /dev/null +++ b/container/state.go @@ -0,0 +1,378 @@ +package container + +import ( + "errors" + "fmt" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // Note that `Running` and `Paused` are not mutually exclusive: + // When pausing a container (on Linux), the cgroups freezer is used to suspend + // all processes in the container. Freezing the process requires the process to + // be running. As a result, paused containers are both `Running` _and_ `Paused`. + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCodeValue int `json:"ExitCode"` + ErrorMsg string `json:"Error"` // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + Health *Health + + waitStop chan struct{} + waitRemove chan struct{} +} + +// StateStatus is used to return container wait results. +// Implements exec.ExitCode interface. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + err error +} + +// ExitCode returns current exitcode for the state. +func (s StateStatus) ExitCode() int { + return s.exitCode +} + +// Err returns current error for the state. Returns nil if the container had +// exited on its own. +func (s StateStatus) Err() error { + return s.err +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitStop: make(chan struct{}), + waitRemove: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + if h := s.Health; h != nil { + return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// HealthString returns a single string to describe health status. +func (s *State) HealthString() string { + if s.Health == nil { + return types.NoHealthcheck + } + + return s.Health.String() +} + +// IsValidHealthString checks if the provided string is a valid container health status or not. +func IsValidHealthString(s string) bool { + return s == types.Starting || + s == types.Healthy || + s == types.Unhealthy || + s == types.NoHealthcheck +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.RemovalInProgress { + return "removing" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "removing" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +// WaitCondition is an enum type for different states to wait for. +type WaitCondition int + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = iota + WaitConditionNextExit + WaitConditionRemoved +) + +// Wait waits until the container is in a certain state indicated by the given +// condition. A context must be used for cancelling the request, controlling +// timeouts, and avoiding goroutine leaks. Wait must be called without holding +// the state lock. Returns a channel from which the caller will receive the +// result. If the container exited on its own, the result's Err() method will +// be nil and its ExitCode() method will return the conatiners exit code, +// otherwise, the results Err() method will return an error indicating why the +// wait operation failed. +func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus { + s.Lock() + defer s.Unlock() + + if condition == WaitConditionNotRunning && !s.Running { + // Buffer so we can put it in the channel now. + resultC := make(chan StateStatus, 1) + + // Send the current status. + resultC <- StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + + return resultC + } + + // If we are waiting only for removal, the waitStop channel should + // remain nil and block forever. + var waitStop chan struct{} + if condition < WaitConditionRemoved { + waitStop = s.waitStop + } + + // Always wait for removal, just in case the container gets removed + // while it is still in a "created" state, in which case it is never + // actually stopped. + waitRemove := s.waitRemove + + resultC := make(chan StateStatus) + + go func() { + select { + case <-ctx.Done(): + // Context timeout or cancellation. + resultC <- StateStatus{ + exitCode: -1, + err: ctx.Err(), + } + return + case <-waitStop: + case <-waitRemove: + } + + s.Lock() + result := StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + s.Unlock() + + resultC <- result + }() + + return resultC +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +// ExitCode returns current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) ExitCode() int { + return s.ExitCodeValue +} + +// SetExitCode sets current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) SetExitCode(ec int) { + s.ExitCodeValue = ec +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.ErrorMsg = "" + s.Running = true + s.Restarting = false + s.ExitCodeValue = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitStop) // Fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetRestarting sets the container state to "restarting" without locking. +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitStop) // Fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.ErrorMsg = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress makes the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} + +// SetRemoved assumes this container is already in the "dead" state and +// closes the internal waitRemove channel to unblock callers waiting for a +// container to be removed. +func (s *State) SetRemoved() { + s.Lock() + close(s.waitRemove) // Unblock those waiting on remove. + s.Unlock() +} + +// Err returns an error if there is one. +func (s *State) Err() error { + if s.ErrorMsg != "" { + return errors.New(s.ErrorMsg) + } + return nil +} diff --git a/container/state_solaris.go b/container/state_solaris.go new file mode 100644 index 0000000..1229650 --- /dev/null +++ b/container/state_solaris.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/container/state_test.go b/container/state_test.go new file mode 100644 index 0000000..2a90e55 --- /dev/null +++ b/container/state_test.go @@ -0,0 +1,168 @@ +package container + +import ( + "context" + "testing" + "time" + + "github.com/docker/docker/api/types" +) + +func TestIsValidHealthString(t *testing.T) { + contexts := []struct { + Health string + Expected bool + }{ + {types.Healthy, true}, + {types.Unhealthy, true}, + {types.Starting, true}, + {types.NoHealthcheck, true}, + {"fail", false}, + } + + for _, c := range contexts { + v := IsValidHealthString(c.Health) + if v != c.Expected { + t.Fatalf("Expected %t, but got %t", c.Expected, v) + } + } +} + +func TestStateRunStop(t *testing.T) { + s := NewState() + + // Begin another wait with WaitConditionRemoved. It should complete + // within 200 milliseconds. + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + removalWait := s.Wait(ctx, WaitConditionRemoved) + + // Full lifecycle two times. + for i := 1; i <= 2; i++ { + // A wait with WaitConditionNotRunning should return + // immediately since the state is now either "created" (on the + // first iteration) or "exited" (on the second iteration). It + // shouldn't take more than 50 milliseconds. + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + // Expectx exit code to be i-1 since it should be the exit + // code from the previous loop or 0 for the created state. + if status := <-s.Wait(ctx, WaitConditionNotRunning); status.ExitCode() != i-1 { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i-1, status.Err()) + } + + // A wait with WaitConditionNextExit should block until the + // container has started and exited. It shouldn't take more + // than 100 milliseconds. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + initialWait := s.Wait(ctx, WaitConditionNextExit) + + // Set the state to "Running". + s.Lock() + s.SetRunning(i, true) + s.Unlock() + + // Assert desired state. + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i { + t.Fatalf("Pid %v, expected %v", s.Pid, i) + } + if s.ExitCode() != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) + } + + // Now that it's running, a wait with WaitConditionNotRunning + // should block until we stop the container. It shouldn't take + // more than 100 milliseconds. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + exitWait := s.Wait(ctx, WaitConditionNotRunning) + + // Set the state to "Exited". + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: i}) + s.Unlock() + + // Assert desired state. + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + + // Receive the initialWait result. + if status := <-initialWait; status.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err()) + } + + // Receive the exitWait result. + if status := <-exitWait; status.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err()) + } + } + + // Set the state to dead and removed. + s.SetDead() + s.SetRemoved() + + // Wait for removed status or timeout. + if status := <-removalWait; status.ExitCode() != 2 { + // Should have the final exit code from the loop. + t.Fatalf("Removal wait exitCode %v, expected %v, err %q", status.ExitCode(), 2, status.Err()) + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + + s.Lock() + s.SetRunning(0, true) + s.Unlock() + + // Start a wait with a timeout. + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + waitC := s.Wait(ctx, WaitConditionNotRunning) + + // It should timeout *before* this 200ms timer does. + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case status := <-waitC: + t.Log("Stop callback fired") + // Should be a timeout error. + if status.Err() == nil { + t.Fatal("expected timeout error, got nil") + } + if status.ExitCode() != -1 { + t.Fatalf("expected exit code %v, got %v", -1, status.ExitCode()) + } + } + + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: 0}) + s.Unlock() + + // Start another wait with a timeout. This one should return + // immediately. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + waitC = s.Wait(ctx, WaitConditionNotRunning) + + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case status := <-waitC: + t.Log("Stop callback fired") + if status.ExitCode() != 0 { + t.Fatalf("expected exit code %v, got %v, err %q", 0, status.ExitCode(), status.Err()) + } + } +} diff --git a/container/state_unix.go b/container/state_unix.go new file mode 100644 index 0000000..a2fa5af --- /dev/null +++ b/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/container/state_windows.go b/container/state_windows.go new file mode 100644 index 0000000..1229650 --- /dev/null +++ b/container/state_windows.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/container/store.go b/container/store.go new file mode 100644 index 0000000..042fb1a --- /dev/null +++ b/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/container/stream/attach.go b/container/stream/attach.go new file mode 100644 index 0000000..3dd53d3 --- /dev/null +++ b/container/stream/attach.go @@ -0,0 +1,179 @@ +package stream + +import ( + "io" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/term" +) + +var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q + +// AttachConfig is the config struct used to attach a client to a stream's stdio +type AttachConfig struct { + // Tells the attach copier that the stream's stdin is a TTY and to look for + // escape sequences in stdin to detach from the stream. + // When true the escape sequence is not passed to the underlying stream + TTY bool + // Specifies the detach keys the client will be using + // Only useful when `TTY` is true + DetachKeys []byte + + // CloseStdin signals that once done, stdin for the attached stream should be closed + // For example, this would close the attached container's stdin. + CloseStdin bool + + // UseStd* indicate whether the client has requested to be connected to the + // given stream or not. These flags are used instead of checking Std* != nil + // at points before the client streams Std* are wired up. + UseStdin, UseStdout, UseStderr bool + + // CStd* are the streams directly connected to the container + CStdin io.WriteCloser + CStdout, CStderr io.ReadCloser + + // Provide client streams to wire up to + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +// AttachStreams attaches the container's streams to the AttachConfig +func (c *Config) AttachStreams(cfg *AttachConfig) { + if cfg.UseStdin { + cfg.CStdin = c.StdinPipe() + } + + if cfg.UseStdout { + cfg.CStdout = c.StdoutPipe() + } + + if cfg.UseStderr { + cfg.CStderr = c.StderrPipe() + } +} + +// CopyStreams starts goroutines to copy data in and out to/from the container +func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) chan error { + var ( + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if cfg.Stdin != nil { + wg.Add(1) + } + + if cfg.Stdout != nil { + wg.Add(1) + } + + if cfg.Stderr != nil { + wg.Add(1) + } + + // Connect stdin of container to the attach stdin stream. + go func() { + if cfg.Stdin == nil { + return + } + logrus.Debug("attach: stdin: begin") + + var err error + if cfg.TTY { + _, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys) + } else { + _, err = pools.Copy(cfg.CStdin, cfg.Stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if cfg.CloseStdin && !cfg.TTY { + cfg.CStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + } + logrus.Debug("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := pools.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if cfg.Stdin != nil { + cfg.Stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", cfg.Stdout, cfg.CStdout) + go attachStream("stderr", cfg.Stderr, cfg.CStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cfg.CStdin != nil { + cfg.CStdin.Close() + } + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + keys = defaultEscapeSequence + } + pr := term.NewEscapeProxy(src, keys) + defer src.Close() + + return pools.Copy(dst, pr) +} diff --git a/container/stream/streams.go b/container/stream/streams.go new file mode 100644 index 0000000..735bab5 --- /dev/null +++ b/container/stream/streams.go @@ -0,0 +1,146 @@ +package stream + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" +) + +// Config holds information about I/O streams managed together. +// +// config.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type Config struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewConfig() *Config { + return &Config{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (c *Config) Stdout() *broadcaster.Unbuffered { + return c.stdout +} + +// Stderr returns the standard error in the configuration. +func (c *Config) Stderr() *broadcaster.Unbuffered { + return c.stderr +} + +// Stdin returns the standard input in the configuration. +func (c *Config) Stdin() io.ReadCloser { + return c.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (c *Config) StdinPipe() io.WriteCloser { + return c.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +// This will block stdout if unconsumed. +func (c *Config) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +// This will block stderr if unconsumed. +func (c *Config) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (c *Config) NewInputPipes() { + c.stdin, c.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (c *Config) NewNopInputPipe() { + c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (c *Config) CloseStreams() error { + var errors []string + + if c.stdin != nil { + if err := c.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := c.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := c.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} + +// CopyToPipe connects streamconfig with a libcontainerd.IOPipe +func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) { + copyFunc := func(w io.Writer, r io.ReadCloser) { + c.Add(1) + go func() { + if _, err := pools.Copy(w, r); err != nil { + logrus.Errorf("stream copy error: %+v", err) + } + r.Close() + c.Done() + }() + } + + if iop.Stdout != nil { + copyFunc(c.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copyFunc(c.Stderr(), iop.Stderr) + } + + if stdin := c.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + pools.Copy(iop.Stdin, stdin) + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + }() + } + } +} diff --git a/container/view.go b/container/view.go new file mode 100644 index 0000000..f605b4f --- /dev/null +++ b/container/view.go @@ -0,0 +1,302 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-memdb" +) + +const ( + memdbTable = "containers" + memdbIDIndex = "id" +) + +// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a +// versioned ACID in-memory store. +type Snapshot struct { + types.Container + + // additional info queries need to filter on + // preserve nanosec resolution for queries + CreatedAt time.Time + StartedAt time.Time + Name string + Pid int + ExitCode int + Running bool + Paused bool + Managed bool + ExposedPorts nat.PortSet + PortBindings nat.PortSet + Health string + HostConfig struct { + Isolation string + } +} + +// ViewDB provides an in-memory transactional (ACID) container Store +type ViewDB interface { + Snapshot(nameIndex *registrar.Registrar) View + Save(*Container) error + Delete(*Container) error +} + +// View can be used by readers to avoid locking +type View interface { + All() ([]Snapshot, error) + Get(id string) (*Snapshot, error) +} + +var schema = &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + memdbTable: { + Name: memdbTable, + Indexes: map[string]*memdb.IndexSchema{ + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &containerByIDIndexer{}, + }, + }, + }, + }, +} + +type memDB struct { + store *memdb.MemDB +} + +// NoSuchContainerError indicates that the container wasn't found in the +// database. +type NoSuchContainerError struct { + id string +} + +// Error satisfies the error interface. +func (e NoSuchContainerError) Error() string { + return "no such container " + e.id +} + +// NewViewDB provides the default implementation, with the default schema +func NewViewDB() (ViewDB, error) { + store, err := memdb.NewMemDB(schema) + if err != nil { + return nil, err + } + return &memDB{store: store}, nil +} + +// Snapshot provides a consistent read-only View of the database +func (db *memDB) Snapshot(index *registrar.Registrar) View { + return &memdbView{ + txn: db.store.Txn(false), + nameIndex: index.GetAll(), + } +} + +// Save atomically updates the in-memory store state for a Container. +// Only read only (deep) copies of containers may be passed in. +func (db *memDB) Save(c *Container) error { + txn := db.store.Txn(true) + defer txn.Commit() + return txn.Insert(memdbTable, c) +} + +// Delete removes an item by ID +func (db *memDB) Delete(c *Container) error { + txn := db.store.Txn(true) + defer txn.Commit() + return txn.Delete(memdbTable, NewBaseContainer(c.ID, c.Root)) +} + +type memdbView struct { + txn *memdb.Txn + nameIndex map[string][]string +} + +// All returns a all items in this snapshot. Returned objects must never be modified. +func (v *memdbView) All() ([]Snapshot, error) { + var all []Snapshot + iter, err := v.txn.Get(memdbTable, memdbIDIndex) + if err != nil { + return nil, err + } + for { + item := iter.Next() + if item == nil { + break + } + snapshot := v.transform(item.(*Container)) + all = append(all, *snapshot) + } + return all, nil +} + +// Get returns an item by id. Returned objects must never be modified. +func (v *memdbView) Get(id string) (*Snapshot, error) { + s, err := v.txn.First(memdbTable, memdbIDIndex, id) + if err != nil { + return nil, err + } + if s == nil { + return nil, NoSuchContainerError{id: id} + } + return v.transform(s.(*Container)), nil +} + +// transform maps a (deep) copied Container object to what queries need. +// A lock on the Container is not held because these are immutable deep copies. +func (v *memdbView) transform(container *Container) *Snapshot { + snapshot := &Snapshot{ + Container: types.Container{ + ID: container.ID, + Names: v.nameIndex[container.ID], + ImageID: container.ImageID.String(), + Ports: []types.Port{}, + Mounts: container.GetMountPoints(), + State: container.State.StateString(), + Status: container.State.String(), + Created: container.Created.Unix(), + }, + CreatedAt: container.Created, + StartedAt: container.StartedAt, + Name: container.Name, + Pid: container.Pid, + Managed: container.Managed, + ExposedPorts: make(nat.PortSet), + PortBindings: make(nat.PortSet), + Health: container.HealthString(), + Running: container.Running, + Paused: container.Paused, + ExitCode: container.ExitCode(), + } + + if snapshot.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + snapshot.Names = []string{} + } + + if container.HostConfig != nil { + snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation) + for binding := range container.HostConfig.PortBindings { + snapshot.PortBindings[binding] = struct{}{} + } + } + + if container.Config != nil { + snapshot.Image = container.Config.Image + snapshot.Labels = container.Config.Labels + for exposed := range container.Config.ExposedPorts { + snapshot.ExposedPorts[exposed] = struct{}{} + } + } + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + snapshot.Command = container.Path + } + + snapshot.Ports = []types.Port{} + networks := make(map[string]*network.EndpointSettings) + if container.NetworkSettings != nil { + for name, netw := range container.NetworkSettings.Networks { + if netw == nil || netw.EndpointSettings == nil { + continue + } + networks[name] = &network.EndpointSettings{ + EndpointID: netw.EndpointID, + Gateway: netw.Gateway, + IPAddress: netw.IPAddress, + IPPrefixLen: netw.IPPrefixLen, + IPv6Gateway: netw.IPv6Gateway, + GlobalIPv6Address: netw.GlobalIPv6Address, + GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen, + MacAddress: netw.MacAddress, + NetworkID: netw.NetworkID, + } + if netw.IPAMConfig != nil { + networks[name].IPAMConfig = &network.EndpointIPAMConfig{ + IPv4Address: netw.IPAMConfig.IPv4Address, + IPv6Address: netw.IPAMConfig.IPv6Address, + } + } + } + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + logrus.Warnf("invalid port map %+v", err) + continue + } + if len(bindings) == 0 { + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + logrus.Warnf("invalid host port map %+v", err) + continue + } + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + } + snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + return snapshot +} + +// containerByIDIndexer is used to extract the ID field from Container types. +// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct. +type containerByIDIndexer struct{} + +// FromObject implements the memdb.SingleIndexer interface for Container objects +func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + c, ok := obj.(*Container) + if !ok { + return false, nil, fmt.Errorf("%T is not a Container", obj) + } + // Add the null character as a terminator + v := c.ID + "\x00" + return true, []byte(v), nil +} + +// FromArgs implements the memdb.Indexer interface +func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} diff --git a/container/view_test.go b/container/view_test.go new file mode 100644 index 0000000..9b87299 --- /dev/null +++ b/container/view_test.go @@ -0,0 +1,106 @@ +package container + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/registrar" + "github.com/pborman/uuid" +) + +var root string + +func TestMain(m *testing.M) { + var err error + root, err = ioutil.TempDir("", "docker-container-test-") + if err != nil { + panic(err) + } + defer os.RemoveAll(root) + + os.Exit(m.Run()) +} + +func newContainer(t *testing.T) *Container { + var ( + id = uuid.New() + cRoot = filepath.Join(root, id) + ) + if err := os.MkdirAll(cRoot, 0755); err != nil { + t.Fatal(err) + } + c := NewBaseContainer(id, cRoot) + c.HostConfig = &containertypes.HostConfig{} + return c +} + +func TestViewSaveDelete(t *testing.T) { + db, err := NewViewDB() + if err != nil { + t.Fatal(err) + } + c := newContainer(t) + if err := c.CheckpointTo(db); err != nil { + t.Fatal(err) + } + if err := db.Delete(c); err != nil { + t.Fatal(err) + } +} + +func TestViewAll(t *testing.T) { + var ( + db, _ = NewViewDB() + names = registrar.NewRegistrar() + one = newContainer(t) + two = newContainer(t) + ) + one.Pid = 10 + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } + two.Pid = 20 + if err := two.CheckpointTo(db); err != nil { + t.Fatal(err) + } + + all, err := db.Snapshot(names).All() + if err != nil { + t.Fatal(err) + } + if l := len(all); l != 2 { + t.Fatalf("expected 2 items, got %d", l) + } + byID := make(map[string]Snapshot) + for i := range all { + byID[all[i].ID] = all[i] + } + if s, ok := byID[one.ID]; !ok || s.Pid != 10 { + t.Fatalf("expected something different with for id=%s: %v", one.ID, s) + } + if s, ok := byID[two.ID]; !ok || s.Pid != 20 { + t.Fatalf("expected something different with for id=%s: %v", two.ID, s) + } +} + +func TestViewGet(t *testing.T) { + var ( + db, _ = NewViewDB() + names = registrar.NewRegistrar() + one = newContainer(t) + ) + one.ImageID = "some-image-123" + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } + s, err := db.Snapshot(names).Get(one.ID) + if err != nil { + t.Fatal(err) + } + if s == nil || s.ImageID != "some-image-123" { + t.Fatalf("expected ImageID=some-image-123. Got: %v", s) + } +} diff --git a/contrib/README.md b/contrib/README.md new file mode 100644 index 0000000..92b1d94 --- /dev/null +++ b/contrib/README.md @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/contrib/REVIEWERS b/contrib/REVIEWERS new file mode 100644 index 0000000..18e05a3 --- /dev/null +++ b/contrib/REVIEWERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/contrib/apparmor/main.go b/contrib/apparmor/main.go new file mode 100644 index 0000000..f4a2978 --- /dev/null +++ b/contrib/apparmor/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "log" + "os" + "path" + "text/template" + + "github.com/docker/docker/pkg/aaparser" +) + +type profileData struct { + Version int +} + +func main() { + if len(os.Args) < 2 { + log.Fatal("pass a filename to save the profile in.") + } + + // parse the arg + apparmorProfilePath := os.Args[1] + + version, err := aaparser.GetVersion() + if err != nil { + log.Fatal(err) + } + data := profileData{ + Version: version, + } + fmt.Printf("apparmor_parser is of version %+v\n", data) + + // parse the template + compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) + if err != nil { + log.Fatalf("parsing template failed: %v", err) + } + + // make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { + log.Fatal(err) + } + + f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + if err := compiled.Execute(f, data); err != nil { + log.Fatalf("executing template failed: %v", err) + } + + fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) +} diff --git a/contrib/apparmor/template.go b/contrib/apparmor/template.go new file mode 100644 index 0000000..e5e1c8b --- /dev/null +++ b/contrib/apparmor/template.go @@ -0,0 +1,268 @@ +package main + +const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker + +profile /usr/bin/docker (attach_disconnected, complain) { + # Prevent following links to these files during container setup. + deny /etc/** mkl, + deny /dev/** kl, + deny /sys/** mkl, + deny /proc/** mkl, + + mount -> @{DOCKER_GRAPH_PATH}/**, + mount -> /, + mount -> /proc/**, + mount -> /sys/**, + mount -> /run/docker/netns/**, + mount -> /.pivot_root[0-9]*/, + + / r, + + umount, + pivot_root, +{{if ge .Version 209000}} + signal (receive) peer=@{profile_name}, + signal (receive) peer=unconfined, + signal (send), +{{end}} + network, + capability, + owner /** rw, + @{DOCKER_GRAPH_PATH}/** rwl, + @{DOCKER_GRAPH_PATH}/linkgraph.db k, + @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, + @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, + + # For non-root client use: + /dev/urandom r, + /dev/null rw, + /dev/pts/[0-9]* rw, + /run/docker.sock rw, + /proc/** r, + /proc/[0-9]*/attr/exec w, + /sys/kernel/mm/hugepages/ r, + /etc/localtime r, + /etc/ld.so.cache r, + /etc/passwd r, + +{{if ge .Version 209000}} + ptrace peer=@{profile_name}, + ptrace (read) peer=docker-default, + deny ptrace (trace) peer=docker-default, + deny ptrace peer=/usr/bin/docker///bin/ps, +{{end}} + + /usr/lib/** rm, + /lib/** rm, + + /usr/bin/docker pix, + /sbin/xtables-multi rCx, + /sbin/iptables rCx, + /sbin/modprobe rCx, + /sbin/auplink rCx, + /sbin/mke2fs rCx, + /sbin/tune2fs rCx, + /sbin/blkid rCx, + /bin/kmod rCx, + /usr/bin/xz rCx, + /bin/ps rCx, + /bin/tar rCx, + /bin/cat rCx, + /sbin/zfs rCx, + /sbin/apparmor_parser rCx, + +{{if ge .Version 209000}} + # Transitions + change_profile -> docker-*, + change_profile -> unconfined, +{{end}} + + profile /bin/cat (complain) { + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /proc r, + /bin/cat mr, + + # For reading in 'docker stats': + /proc/[0-9]*/net/dev r, + } + profile /bin/ps (complain) { + /etc/ld.so.cache r, + /etc/localtime r, + /etc/passwd r, + /etc/nsswitch.conf r, + /lib/** rm, + /proc/[0-9]*/** r, + /dev/null rw, + /bin/ps mr, + +{{if ge .Version 209000}} + # We don't need ptrace so we'll deny and ignore the error. + deny ptrace (read, trace), +{{end}} + + # Quiet dac_override denials + deny capability dac_override, + deny capability dac_read_search, + deny capability sys_ptrace, + + /dev/tty r, + /proc/stat r, + /proc/cpuinfo r, + /proc/meminfo r, + /proc/uptime r, + /sys/devices/system/cpu/online r, + /proc/sys/kernel/pid_max r, + /proc/ r, + /proc/tty/drivers r, + } + profile /sbin/iptables (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability net_admin, + } + profile /sbin/auplink flags=(attach_disconnected, complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_admin, + capability dac_override, + + @{DOCKER_GRAPH_PATH}/aufs/** rw, + @{DOCKER_GRAPH_PATH}/tmp/** rw, + # For user namespaces: + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, + + /sys/fs/aufs/** r, + /lib/** rm, + /apparmor/.null r, + /dev/null rw, + /etc/ld.so.cache r, + /sbin/auplink rm, + /proc/fs/aufs/** rw, + /proc/[0-9]*/mounts rw, + } + profile /sbin/modprobe /bin/kmod (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_module, + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /apparmor/.null rw, + /sbin/modprobe rm, + /bin/kmod rm, + /proc/cmdline r, + /sys/module/** r, + /etc/modprobe.d{/,/**} r, + } + # xz works via pipes, so we do not need access to the filesystem. + profile /usr/bin/xz (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + /etc/ld.so.cache r, + /lib/** rm, + /usr/bin/xz rm, + deny /proc/** rw, + deny /sys/** rw, + } + profile /sbin/xtables-multi (attach_disconnected, complain) { + /etc/ld.so.cache r, + /lib/** rm, + /sbin/xtables-multi rm, + /apparmor/.null w, + /dev/null rw, + + /proc r, + + capability net_raw, + capability net_admin, + network raw, + } + profile /sbin/zfs (attach_disconnected, complain) { + file, + capability, + } + profile /sbin/mke2fs (complain) { + /sbin/mke2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/mke2fs.conf r, + /etc/mtab r, + + /dev/dm-* rw, + /dev/urandom r, + /dev/null rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/tune2fs (complain) { + /sbin/tune2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/blkid.conf r, + /etc/mtab r, + /etc/ld.so.cache r, + + /dev/null rw, + /dev/.blkid.tab r, + /dev/dm-* rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/blkid (complain) { + /sbin/blkid rm, + + /lib/** rm, + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/blkid.conf r, + + /dev/null rw, + /dev/.blkid.tab rl, + /dev/.blkid.tab* rwl, + /dev/dm-* r, + + /sys/devices/virtual/block/** r, + + capability mknod, + + mount -> @{DOCKER_GRAPH_PATH}/**, + } + profile /sbin/apparmor_parser (complain) { + /sbin/apparmor_parser rm, + + /lib/** rm, + + /etc/ld.so.cache r, + /etc/apparmor/** r, + /etc/apparmor.d/** r, + /etc/apparmor.d/cache/** w, + + /dev/null rw, + + /sys/kernel/security/apparmor/** r, + /sys/kernel/security/apparmor/.replace w, + + /proc/[0-9]*/mounts r, + /proc/sys/kernel/osrelease r, + /proc r, + + capability mac_admin, + } +}` diff --git a/contrib/builder/deb/aarch64/build.sh b/contrib/builder/deb/aarch64/build.sh new file mode 100755 index 0000000..f8b2506 --- /dev/null +++ b/contrib/builder/deb/aarch64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/contrib/builder/deb/aarch64/debian-jessie/Dockerfile b/contrib/builder/deb/aarch64/debian-jessie/Dockerfile new file mode 100644 index 0000000..0e80da4 --- /dev/null +++ b/contrib/builder/deb/aarch64/debian-jessie/Dockerfile @@ -0,0 +1,25 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/debian:jessie + +RUN echo deb http://ftp.debian.org/debian jessie-backports main > /etc/apt/sources.list.d/backports.list +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/contrib/builder/deb/aarch64/debian-stretch/Dockerfile b/contrib/builder/deb/aarch64/debian-stretch/Dockerfile new file mode 100644 index 0000000..caee191 --- /dev/null +++ b/contrib/builder/deb/aarch64/debian-stretch/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/debian:stretch + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/contrib/builder/deb/aarch64/generate.sh b/contrib/builder/deb/aarch64/generate.sh new file mode 100755 index 0000000..7c9217d --- /dev/null +++ b/contrib/builder/deb/aarch64/generate.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-trusty +# to only update ubuntu-trusty/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it +# +# Note: non-LTS versions are not guaranteed to work. + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="aarch64/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! + # + + FROM $from + + EOF + + dockerBuildTags='apparmor selinux' + runcBuildTags='apparmor selinux' + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for trusty only includes Go 1.2 implementation which + # is too old to build current go source, fortunately trusty has + # golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go ) + ;; + jessie) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for jessie only includes Go 1.2 implementation which + # is too old to build current go source, fortunately jessie backports + # has golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go libseccomp-dev ) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + stretch|xenial) + packages+=( libsystemd-dev ) + packages+=( golang-go libseccomp-dev ) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + *) + echo "Unsupported distro:" $distro:$suite + rm -fr "$version" + exit 1 + ;; + esac + + case "$suite" in + jessie) + echo 'RUN echo deb http://ftp.debian.org/debian jessie-backports main > /etc/apt/sources.list.d/backports.list' >> "$version/Dockerfile" + ;; + *) + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$suite" in + jessie|trusty) + echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) + ;; + esac + + echo "# Install Go" >> "$version/Dockerfile" + echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile" + echo "# the image to build go from source." >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile" + echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile" + echo ' && cd /usr/src/go/src \' >> "$version/Dockerfile" + echo ' && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV PATH /usr/src/go/bin:$PATH' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..6f4a3e9 --- /dev/null +++ b/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile @@ -0,0 +1,24 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..19a510b --- /dev/null +++ b/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.8.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/contrib/builder/deb/amd64/README.md b/contrib/builder/deb/amd64/README.md new file mode 100644 index 0000000..20a0ff1 --- /dev/null +++ b/contrib/builder/deb/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-deb` + +This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. + +To add new tags, see [`contrib/builder/deb/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/contrib/builder/deb/amd64/build.sh b/contrib/builder/deb/amd64/build.sh new file mode 100755 index 0000000..f8b2506 --- /dev/null +++ b/contrib/builder/deb/amd64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/contrib/builder/deb/amd64/debian-jessie/Dockerfile new file mode 100644 index 0000000..9b6233c --- /dev/null +++ b/contrib/builder/deb/amd64/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/contrib/builder/deb/amd64/debian-stretch/Dockerfile new file mode 100644 index 0000000..d95c194 --- /dev/null +++ b/contrib/builder/deb/amd64/debian-stretch/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:stretch + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/contrib/builder/deb/amd64/debian-wheezy/Dockerfile new file mode 100644 index 0000000..56763a5 --- /dev/null +++ b/contrib/builder/deb/amd64/debian-wheezy/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:wheezy-backports + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + +RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/amd64/generate.sh b/contrib/builder/deb/amd64/generate.sh new file mode 100755 index 0000000..5708def --- /dev/null +++ b/contrib/builder/deb/amd64/generate.sh @@ -0,0 +1,130 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + debian:wheezy) + # add -backports, like our users have to + from+='-backports' + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [ "$distro" = "debian" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags= + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev ) ;; + *) packages+=( libsystemd-dev ) ;; + esac + + # debian wheezy does not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..f620116 --- /dev/null +++ b/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..6b9370d --- /dev/null +++ b/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..232c86f --- /dev/null +++ b/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile b/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile new file mode 100644 index 0000000..0b4e45b --- /dev/null +++ b/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:zesty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/contrib/builder/deb/armhf/debian-jessie/Dockerfile new file mode 100644 index 0000000..9a50cd1 --- /dev/null +++ b/contrib/builder/deb/armhf/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/armhf/generate.sh b/contrib/builder/deb/armhf/generate.sh new file mode 100755 index 0000000..285dbbf --- /dev/null +++ b/contrib/builder/deb/armhf/generate.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + raspbian:jessie) + from="resin/rpi-raspbian:jessie" + ;; + *) + from="armhf/$from" + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [[ "$distro" = "debian" || "$distro" = "raspbian" ]]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags= + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev ) ;; + *) packages+=( libsystemd-dev ) ;; + esac + + # debian wheezy does not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.armhf >> "$version/Dockerfile" + if [ "$distro" == 'raspbian' ]; + then + cat <> "$version/Dockerfile" +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +EOF + fi + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile new file mode 100644 index 0000000..810cb2a --- /dev/null +++ b/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM resin/rpi-raspbian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..530f9fc --- /dev/null +++ b/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..a7f79ee --- /dev/null +++ b/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..69f1bed --- /dev/null +++ b/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/ppc64le/build.sh b/contrib/builder/deb/ppc64le/build.sh new file mode 100755 index 0000000..83dbf94 --- /dev/null +++ b/contrib/builder/deb/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/contrib/builder/deb/ppc64le/generate.sh b/contrib/builder/deb/ppc64le/generate.sh new file mode 100755 index 0000000..bf777ab --- /dev/null +++ b/contrib/builder/deb/ppc64le/generate.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="ppc64le/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags= + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + ;; + *) + # libseccomp isn't available until ubuntu xenial and is required for "seccomp.h" & "libseccomp.so" + packages+=( libseccomp-dev ) + packages+=( libsystemd-dev ) + ;; + esac + + # buildtags + case "$suite" in + # trusty has no seccomp package + trusty) + runcBuildTags="apparmor selinux" + ;; + # ppc64le support was backported into libseccomp 2.2.3-2, + # so enable seccomp by default + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000..0841a99 --- /dev/null +++ b/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..f9dd250 --- /dev/null +++ b/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..539969d --- /dev/null +++ b/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/s390x/build.sh b/contrib/builder/deb/s390x/build.sh new file mode 100755 index 0000000..f8b2506 --- /dev/null +++ b/contrib/builder/deb/s390x/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/contrib/builder/deb/s390x/generate.sh b/contrib/builder/deb/s390x/generate.sh new file mode 100755 index 0000000..c04d24e --- /dev/null +++ b/contrib/builder/deb/s390x/generate.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="s390x/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags= + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + pkg-config # for detecting things like libsystemd-journal dynamically + libsystemd-dev + vim-common # tini dep + ) + + case "$suite" in + # s390x needs libseccomp 2.3.1 + xenial) + # Ubuntu Xenial has libseccomp 2.2.3 + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor selinux seccomp" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.s390x >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000..2ed0ad3 --- /dev/null +++ b/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000..af79ff5 --- /dev/null +++ b/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/contrib/builder/rpm/amd64/README.md b/contrib/builder/rpm/amd64/README.md new file mode 100644 index 0000000..5f2e888 --- /dev/null +++ b/contrib/builder/rpm/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-rpm` + +This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. + +To add new tags, see [`contrib/builder/rpm/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile b/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile new file mode 100644 index 0000000..7632daf --- /dev/null +++ b/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM amazonlinux:latest + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/amd64/build.sh b/contrib/builder/rpm/amd64/build.sh new file mode 100755 index 0000000..1e3565a --- /dev/null +++ b/contrib/builder/rpm/amd64/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/contrib/builder/rpm/amd64/centos-7/Dockerfile b/contrib/builder/rpm/amd64/centos-7/Dockerfile new file mode 100644 index 0000000..82cb915 --- /dev/null +++ b/contrib/builder/rpm/amd64/centos-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/contrib/builder/rpm/amd64/fedora-24/Dockerfile new file mode 100644 index 0000000..cdb922c --- /dev/null +++ b/contrib/builder/rpm/amd64/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/contrib/builder/rpm/amd64/fedora-25/Dockerfile new file mode 100644 index 0000000..73e9e9a --- /dev/null +++ b/contrib/builder/rpm/amd64/fedora-25/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:25 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/amd64/generate.sh b/contrib/builder/rpm/amd64/generate.sh new file mode 100755 index 0000000..15add3f --- /dev/null +++ b/contrib/builder/rpm/amd64/generate.sh @@ -0,0 +1,187 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + if [[ "$distro" == "photon" ]]; then + installer=tdnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags= + runcBuildTags= + + case "$from" in + oraclelinux:6) + # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version + # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo + echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" + echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*|amazonlinux:latest) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + oraclelinux:*) + # get "Development Tools" packages and dependencies + # we also need yum-utils for yum-config-manager to pull the latest repo file + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + opensuse:*) + # get rpm-build and curl packages and dependencies + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + photon:*) + echo "RUN ${installer} install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp elfutils" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + case "$from" in + oraclelinux:7) + # Enable the optional repository + packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) + ;; + esac + + case "$from" in + oraclelinux:6|amazonlinux:latest) + # doesn't use systemd, doesn't have a devel package for it + packages=( "${packages[@]/systemd-devel}" ) + ;; + esac + + # opensuse & oraclelinx:6 do not have the right libseccomp libs + case "$from" in + opensuse:*|oraclelinux:6) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + opensuse:*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + if [[ "$from" == "opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + photon:*) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$from" in + oraclelinux:6) + # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. + # The ordering is very important and should not be changed. + echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) ;; + esac + + +done diff --git a/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile new file mode 100644 index 0000000..af97fdc --- /dev/null +++ b/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM opensuse:13.2 + +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim systemd-rpm-macros + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux +ENV RUNC_BUILDTAGS selinux + diff --git a/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile new file mode 100644 index 0000000..8e15365 --- /dev/null +++ b/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile @@ -0,0 +1,28 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:6 + +RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 +RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux +ENV RUNC_BUILDTAGS selinux + +ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include + diff --git a/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile new file mode 100644 index 0000000..7e5a832 --- /dev/null +++ b/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/contrib/builder/rpm/amd64/photon-1.0/Dockerfile new file mode 100644 index 0000000..6b489d7 --- /dev/null +++ b/contrib/builder/rpm/amd64/photon-1.0/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM photon:1.0 + +RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp elfutils +RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/armhf/build.sh b/contrib/builder/rpm/armhf/build.sh new file mode 100755 index 0000000..1e3565a --- /dev/null +++ b/contrib/builder/rpm/armhf/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/contrib/builder/rpm/armhf/centos-7/Dockerfile b/contrib/builder/rpm/armhf/centos-7/Dockerfile new file mode 100644 index 0000000..f482fa7 --- /dev/null +++ b/contrib/builder/rpm/armhf/centos-7/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/armhf/generate.sh"! +# + +FROM multiarch/centos:7.2.1511-armhfp-clean + +RUN yum install -y yum-plugin-ovl +RUN yum groupinstall --skip-broken -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/armhf/generate.sh b/contrib/builder/rpm/armhf/generate.sh new file mode 100755 index 0000000..b07231e --- /dev/null +++ b/contrib/builder/rpm/armhf/generate.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash +# vim: set ts=4 sw=4 noet : + +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + + mkdir -p "$version" + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + image="multiarch/centos:7.2.1511-armhfp-clean" + ;; + *) + image="${from}" + ;; + esac + + echo "$version -> FROM $image" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/armhf/generate.sh"! + # + + FROM $image + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags= + runcBuildTags= + + case "$from" in + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + + echo 'RUN yum install -y yum-plugin-ovl' >> "$version/Dockerfile" + + echo 'RUN yum groupinstall --skip-broken -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.armhf >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" +done diff --git a/contrib/builder/rpm/ppc64le/build.sh b/contrib/builder/rpm/ppc64le/build.sh new file mode 100755 index 0000000..1e3565a --- /dev/null +++ b/contrib/builder/rpm/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/contrib/builder/rpm/ppc64le/centos-7/Dockerfile b/contrib/builder/rpm/ppc64le/centos-7/Dockerfile new file mode 100644 index 0000000..95da278 --- /dev/null +++ b/contrib/builder/rpm/ppc64le/centos-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! +# + +FROM ppc64le/centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile b/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile new file mode 100644 index 0000000..70a9f5d --- /dev/null +++ b/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! +# + +FROM ppc64le/fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/ppc64le/generate.sh b/contrib/builder/rpm/ppc64le/generate.sh new file mode 100755 index 0000000..c89f5a3 --- /dev/null +++ b/contrib/builder/rpm/ppc64le/generate.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="ppc64le/${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags= + runcBuildTags= + + case "$from" in + ppc64le/fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + ppc64le/centos:*) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + ppc64le/opensuse:*) + # Add the ppc64le repo (hopefully the image is updated soon) + # get rpm-build and curl packages and dependencies + echo "RUN zypper addrepo -n ppc64le-oss -f https://download.opensuse.org/ports/ppc/distribution/leap/${suite}/repo/oss/ ppc64le-oss" >> "$version/Dockerfile" + echo "RUN zypper addrepo -n ppc64le-updates -f https://download.opensuse.org/ports/update/${suite}/ ppc64le-updates" >> "$version/Dockerfile" + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + # opensuse does not have the right libseccomp libs + case "$from" in + ppc64le/opensuse:*) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + ppc64le/opensuse:*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + if [[ "$from" == "ppc64le/opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + +done diff --git a/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile b/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile new file mode 100644 index 0000000..d11caa4 --- /dev/null +++ b/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/ppc64le/generate.sh"! +# + +FROM ppc64le/opensuse:42.1 + +RUN zypper addrepo -n ppc64le-oss -f https://download.opensuse.org/ports/ppc/distribution/leap/42.1/repo/oss/ ppc64le-oss +RUN zypper addrepo -n ppc64le-updates -f https://download.opensuse.org/ports/update/42.1/ ppc64le-updates +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim + +ENV GO_VERSION 1.8.3 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux +ENV RUNC_BUILDTAGS selinux + diff --git a/contrib/builder/rpm/s390x/build.sh b/contrib/builder/rpm/s390x/build.sh new file mode 100755 index 0000000..1e3565a --- /dev/null +++ b/contrib/builder/rpm/s390x/build.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile b/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile new file mode 100644 index 0000000..1310b21 --- /dev/null +++ b/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/s390x/generate.sh"! +# + +FROM sinenomine/clefos-base-s390x + + +RUN touch /var/lib/rpm/* && yum groupinstall -y "Development Tools" +RUN touch /var/lib/rpm/* && yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux +RUN ln -s /usr/bin/gcc /usr/bin/s390x-linux-gnu-gcc diff --git a/contrib/builder/rpm/s390x/generate.sh b/contrib/builder/rpm/s390x/generate.sh new file mode 100755 index 0000000..81246c5 --- /dev/null +++ b/contrib/builder/rpm/s390x/generate.sh @@ -0,0 +1,144 @@ +#!/usr/bin/env bash +set -e + +# This file is used to auto-generate Dockerfiles for making rpms via 'make rpm' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + case "$distro" in + *opensuse*) + from="opensuse/s390x:tumbleweed" + ;; + *clefos*) + from="sinenomine/${distro}" + ;; + *) + echo No appropriate or supported image available. + exit 1 + ;; + esac + installer=yum + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/s390x/generate.sh"! + # + + FROM $from + + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags='' + runcBuildTags= + + case "$from" in + *clefos*) + # Fix for RHBZ #1213602 + get "Development Tools" packages dependencies + echo 'RUN touch /var/lib/rpm/* && yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + *opensuse*) + echo "RUN zypper ar https://download.opensuse.org/ports/zsystems/tumbleweed/repo/oss/ tumbleweed" >> "$version/Dockerfile" + # get rpm-build and curl packages and dependencies + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + *) + echo No appropriate or supported image available. + exit 1 + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + case "$from" in + *clefos*) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + *opensuse*) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + echo No appropriate or supported image available. + exit 1 + ;; + esac + + case "$from" in + *clefos*) + # Same RHBZ fix needed on all yum lines + echo "RUN touch /var/lib/rpm/* && ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + *opensuse*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + + packages+=( systemd-rpm-macros ) # for use of >= opensuse:13.* + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo No appropriate or supported image available. + exit 1 + ;; + esac + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.s390x >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + # TODO: Investigate why "s390x-linux-gnu-gcc" is required + echo "RUN ln -s /usr/bin/gcc /usr/bin/s390x-linux-gnu-gcc" >> "$version/Dockerfile" +done diff --git a/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile b/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile new file mode 100644 index 0000000..05cad42 --- /dev/null +++ b/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/s390x/generate.sh"! +# + +FROM opensuse/s390x:tumbleweed + + +RUN zypper ar https://download.opensuse.org/ports/zsystems/tumbleweed/repo/oss/ tumbleweed +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros + +ENV GO_VERSION 1.8.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux +ENV RUNC_BUILDTAGS selinux +RUN ln -s /usr/bin/gcc /usr/bin/s390x-linux-gnu-gcc diff --git a/contrib/check-config.sh b/contrib/check-config.sh new file mode 100755 index 0000000..88eb8aa --- /dev/null +++ b/contrib/check-config.sh @@ -0,0 +1,360 @@ +#!/usr/bin/env bash +set -e + +EXITCODE=0 + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) + +if [ $# -gt 0 ]; then + CONFIG="$1" +else + : ${CONFIG:="${possibleConfigs[0]}"} +fi + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +kernelVersion="$(uname -r)" +kernelMajor="${kernelVersion%%.*}" +kernelMinor="${kernelVersion#$kernelMajor.}" +kernelMinor="${kernelMinor%%.*}" + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} +is_set_in_kernel() { + zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null +} +is_set_as_module() { + zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null +} + +color() { + local codes=() + if [ "$1" = 'bold' ]; then + codes=( "${codes[@]}" '1' ) + shift + fi + if [ "$#" -gt 0 ]; then + local code= + case "$1" in + # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors + black) code=30 ;; + red) code=31 ;; + green) code=32 ;; + yellow) code=33 ;; + blue) code=34 ;; + magenta) code=35 ;; + cyan) code=36 ;; + white) code=37 ;; + esac + if [ "$code" ]; then + codes=( "${codes[@]}" "$code" ) + fi + fi + local IFS=';' + echo -en '\033['"${codes[*]}"'m' +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set_in_kernel "$1"; then + wrap_good "CONFIG_$1" 'enabled' + elif is_set_as_module "$1"; then + wrap_good "CONFIG_$1" 'enabled (as module)' + else + wrap_bad "CONFIG_$1" 'missing' + EXITCODE=1 + fi +} + +check_flags() { + for flag in "$@"; do + echo -n "- "; check_flag "$flag" + done +} + +check_command() { + if command -v "$1" >/dev/null 2>&1; then + wrap_good "$1 command" 'available' + else + wrap_bad "$1 command" 'missing' + EXITCODE=1 + fi +} + +check_device() { + if [ -c "$1" ]; then + wrap_good "$1" 'present' + else + wrap_bad "$1" 'missing' + EXITCODE=1 + fi +} + +check_distro_userns() { + source /etc/os-release 2>/dev/null || /bin/true + if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then + # this is a CentOS7 or RHEL7 system + grep -q "user_namespace.enable=1" /proc/cmdline || { + # no user namespace support enabled + wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)" + EXITCODE=1 + } + fi +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + EXITCODE=1 + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + EXITCODE=1 + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG + KEYS + VETH BRIDGE BRIDGE_NETFILTER + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS} + IP_NF_NAT NF_NAT NF_NAT_NEEDED + + # required for bind-mounting /dev/mqueue into containers + POSIX_MQUEUE +) +check_flags "${flags[@]}" +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -lt 8 ]; then + check_flags DEVPTS_MULTIPLE_INSTANCES +fi + +echo + +echo 'Optional Features:' +{ + check_flags USER_NS + check_distro_userns +} +{ + check_flags SECCOMP +} +{ + check_flags CGROUP_PIDS +} +{ + CODE=${EXITCODE} + check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED + if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then + echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)" + EXITCODE=${CODE} + elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then + echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)" + fi +} +{ + if is_set LEGACY_VSYSCALL_NATIVE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled' + echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)" + elif is_set LEGACY_VSYSCALL_EMULATE; then + echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled' + elif is_set LEGACY_VSYSCALL_NONE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled' + echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)" + echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)" + echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)" + echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)" + # else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do + # not have these LEGACY_VSYSCALL options and are effectively + # LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably + # effectively LEGACY_VSYSCALL_NATIVE. + fi +} + +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then + check_flags MEMCG_KMEM +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then + check_flags RESOURCE_COUNTERS +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then + netprio=NETPRIO_CGROUP +else + netprio=CGROUP_NET_PRIO +fi + +flags=( + BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED + CGROUP_PERF + CGROUP_HUGETLB + NET_CLS_CGROUP $netprio + CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED + IP_VS + IP_VS_NFCT + IP_VS_RR +) +check_flags "${flags[@]}" + +if ! is_set EXT4_USE_FOR_EXT2; then + check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY + if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then + echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" + fi +fi + +check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY +if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then + if is_set EXT4_USE_FOR_EXT2; then + echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)" + else + echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" + fi +fi + +echo '- Network Drivers:' +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags VXLAN | sed 's/^/ /' +echo ' Optional (for encrypted networks):' +check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \ + XFRM XFRM_USER XFRM_ALGO INET_ESP INET_XFRM_MODE_TRANSPORT | sed 's/^/ /' +echo ' - "'$(wrap_color 'ipvlan' blue)'":' +check_flags IPVLAN | sed 's/^/ /' +echo ' - "'$(wrap_color 'macvlan' blue)'":' +check_flags MACVLAN DUMMY | sed 's/^/ /' +echo ' - "'$(wrap_color 'ftp,tftp client in container' blue)'":' +check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /' + +# only fail if no storage drivers available +CODE=${EXITCODE} +EXITCODE=0 +STORAGE=1 + +echo '- Storage Drivers:' +echo ' - "'$(wrap_color 'aufs' blue)'":' +check_flags AUFS_FS | sed 's/^/ /' +if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" +fi +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'btrfs' blue)'":' +check_flags BTRFS_FS | sed 's/^/ /' +check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'devicemapper' blue)'":' +check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags OVERLAY_FS | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'zfs' blue)'":' +echo -n " - "; check_device /dev/zfs +echo -n " - "; check_command zfs +echo -n " - "; check_command zpool +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +EXITCODE=$CODE +[ "$STORAGE" = 1 ] && EXITCODE=1 + +echo + +check_limit_over() +{ + if [ $(cat "$1") -le "$2" ]; then + wrap_bad "- $1" "$(cat $1)" + wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black + EXITCODE=1 + else + wrap_good "- $1" "$(cat $1)" + fi +} + +echo 'Limits:' +check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000 +echo + +exit $EXITCODE diff --git a/contrib/desktop-integration/README.md b/contrib/desktop-integration/README.md new file mode 100644 index 0000000..85a01b9 --- /dev/null +++ b/contrib/desktop-integration/README.md @@ -0,0 +1,11 @@ +Desktop Integration +=================== + +The ./contrib/desktop-integration contains examples of typical dockerized +desktop applications. + +Examples +======== + +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/contrib/desktop-integration/chromium/Dockerfile b/contrib/desktop-integration/chromium/Dockerfile new file mode 100644 index 0000000..1872816 --- /dev/null +++ b/contrib/desktop-integration/chromium/Dockerfile @@ -0,0 +1,36 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# Base docker image +FROM debian:jessie +LABEL maintainer Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/contrib/desktop-integration/gparted/Dockerfile b/contrib/desktop-integration/gparted/Dockerfile new file mode 100644 index 0000000..8a9b646 --- /dev/null +++ b/contrib/desktop-integration/gparted/Dockerfile @@ -0,0 +1,31 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +# Base docker image +FROM debian:jessie +LABEL maintainer Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff --git a/contrib/docker-device-tool/README.md b/contrib/docker-device-tool/README.md new file mode 100644 index 0000000..6c54d59 --- /dev/null +++ b/contrib/docker-device-tool/README.md @@ -0,0 +1,14 @@ +Docker device tool for devicemapper storage driver backend +=================== + +The ./contrib/docker-device-tool contains a tool to manipulate devicemapper thin-pool. + +Compile +======== + + $ make shell + ## inside build container + $ go build contrib/docker-device-tool/device_tool.go + + # if devicemapper version is old and compilation fails, compile with `libdm_no_deferred_remove` tag + $ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go new file mode 100644 index 0000000..fc17166 --- /dev/null +++ b/contrib/docker-device-tool/device_tool.go @@ -0,0 +1,176 @@ +// +build !windows,!solaris + +package main + +import ( + "flag" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) +} + +func byteSizeFromString(arg string) (int64, error) { + digits := "" + rest := "" + last := strings.LastIndexAny(arg, "0123456789") + if last >= 0 { + digits = arg[:last+1] + rest = arg[last+1:] + } + + val, err := strconv.ParseInt(digits, 10, 64) + if err != nil { + return val, err + } + + rest = strings.ToLower(strings.TrimSpace(rest)) + + var multiplier int64 = 1 + switch rest { + case "": + multiplier = 1 + case "k", "kb": + multiplier = 1024 + case "m", "mb": + multiplier = 1024 * 1024 + case "g", "gb": + multiplier = 1024 * 1024 * 1024 + case "t", "tb": + multiplier = 1024 * 1024 * 1024 * 1024 + default: + return 0, fmt.Errorf("Unknown size unit: %s", rest) + } + + return val * multiplier, nil +} + +func main() { + root := flag.String("r", "/var/lib/docker", "Docker root dir") + flDebug := flag.Bool("D", false, "Debug mode") + + flag.Parse() + + if *flDebug { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) + } + + if flag.NArg() < 1 { + usage() + } + + args := flag.Args() + + home := path.Join(*root, "devicemapper") + devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) + if err != nil { + fmt.Println("Can't initialize device mapper: ", err) + os.Exit(1) + } + + switch args[0] { + case "status": + status := devices.Status() + fmt.Printf("Pool name: %s\n", status.PoolName) + fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) + fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) + fmt.Printf("Sector size: %d\n", status.SectorSize) + fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) + fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) + break + case "list": + ids := devices.List() + sort.Strings(ids) + for _, id := range ids { + fmt.Println(id) + } + break + case "device": + if flag.NArg() < 2 { + usage() + } + status, err := devices.GetDeviceStatus(args[1]) + if err != nil { + fmt.Println("Can't get device info: ", err) + os.Exit(1) + } + fmt.Printf("Id: %d\n", status.DeviceID) + fmt.Printf("Size: %d\n", status.Size) + fmt.Printf("Transaction Id: %d\n", status.TransactionID) + fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) + fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) + fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) + break + case "resize": + if flag.NArg() < 2 { + usage() + } + + size, err := byteSizeFromString(args[1]) + if err != nil { + fmt.Println("Invalid size: ", err) + os.Exit(1) + } + + err = devices.ResizePool(size) + if err != nil { + fmt.Println("Error resizing pool: ", err) + os.Exit(1) + } + + break + case "snap": + if flag.NArg() < 3 { + usage() + } + + err := devices.AddDevice(args[1], args[2], nil) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + case "remove": + if flag.NArg() < 2 { + usage() + } + + err := devicemapper.RemoveDevice(args[1]) + if err != nil { + fmt.Println("Can't remove device: ", err) + os.Exit(1) + } + break + case "mount": + if flag.NArg() < 3 { + usage() + } + + err := devices.MountDevice(args[1], args[2], "") + if err != nil { + fmt.Println("Can't mount device: ", err) + os.Exit(1) + } + break + default: + fmt.Printf("Unknown command %s\n", args[0]) + usage() + + os.Exit(1) + } + + return +} diff --git a/contrib/docker-device-tool/device_tool_windows.go b/contrib/docker-device-tool/device_tool_windows.go new file mode 100644 index 0000000..da29a2c --- /dev/null +++ b/contrib/docker-device-tool/device_tool_windows.go @@ -0,0 +1,4 @@ +package main + +func main() { +} diff --git a/contrib/docker-machine-install-bundle.sh b/contrib/docker-machine-install-bundle.sh new file mode 100755 index 0000000..8605989 --- /dev/null +++ b/contrib/docker-machine-install-bundle.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +# +# This script installs the bundle to Docker Machine instances, for the purpose +# of testing the latest Docker with Swarm mode enabled. +# Do not use in production. +# +# Requirements (on host to run this script) +# - bash is installed +# - Docker Machine is installed +# - GNU tar is installed +# +# Requirements (on Docker machine instances) +# - Docker can be managed via one of `systemctl`, `service`, or `/etc/init.d/docker` +# +set -e +set -o pipefail + +errexit() { + echo "$1" + exit 1 +} + +BUNDLE="bundles/$(cat VERSION)" + +bundle_files(){ + # prefer dynbinary if exists + for f in dockerd docker-proxy; do + if [ -d $BUNDLE/dynbinary-daemon ]; then + echo $BUNDLE/dynbinary-daemon/$f + else + echo $BUNDLE/binary-daemon/$f + fi + done + for f in docker-containerd docker-containerd-ctr docker-containerd-shim docker-init docker-runc; do + echo $BUNDLE/binary-daemon/$f + done + if [ -d $BUNDLE/dynbinary-client ]; then + echo $BUNDLE/dynbinary-client/docker + else + echo $BUNDLE/binary-client/docker + fi +} + +control_docker(){ + m=$1; op=$2 + # NOTE: `docker-machine ssh $m sh -c "foo bar"` does not work + # (but `docker-machine ssh $m sh -c "foo\ bar"` works) + # Anyway we avoid using `sh -c` here for avoiding confusion + cat < /dev/null; then + systemctl $op docker +elif command -v service > /dev/null; then + service docker $op +elif [ -x /etc/init.d/docker ]; then + /etc/init.d/docker $op +else + echo "not sure how to control the docker daemon" + exit 1 +fi +EOF +} + +detect_prefix(){ + m=$1 + script='dirname $(dirname $(which dockerd))' + echo $script | docker-machine ssh $m sh +} + +install_to(){ + m=$1; shift; files=$@ + echo "$m: detecting docker" + prefix=$(detect_prefix $m) + echo "$m: detected docker on $prefix" + echo "$m: stopping docker" + control_docker $m stop + echo "$m: installing docker" + # NOTE: GNU tar is required because we use --transform here + # TODO: compression (should not be default) + tar ch --transform 's/.*\///' $files | docker-machine ssh $m sudo tar Cx $prefix/bin + echo "$m: starting docker" + control_docker $m start + echo "$m: done" +} + +check_prereq(){ + command -v docker-machine > /dev/null || errexit "docker-machine not installed" + ( tar --version | grep GNU > /dev/null ) || errexit "GNU tar not installed" +} + +case "$1" in + "install") + shift; machines=$@ + check_prereq + files=$(bundle_files) + echo "Files to be installed:" + for f in $files; do echo $f; done + pids=() + for m in $machines; do + install_to $m $files & + pids+=($!) + done + status=0 + for pid in ${pids[@]}; do + wait $pid || { status=$?; echo "background process $pid failed with exit status $status"; } + done + exit $status + ;; + *) + errexit "Usage: $0 install MACHINES" + ;; +esac diff --git a/contrib/dockerize-disk.sh b/contrib/dockerize-disk.sh new file mode 100755 index 0000000..444e243 --- /dev/null +++ b/contrib/dockerize-disk.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +set -e + +if ! command -v qemu-nbd &> /dev/null; then + echo >&2 'error: "qemu-nbd" not found!' + exit 1 +fi + +usage() { + echo "Convert disk image to docker image" + echo "" + echo "usage: $0 image-name disk-image-file [ base-image ]" + echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" + echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" +} + +if [ "$#" -lt 2 ]; then + usage + exit 1 +fi + +CURDIR=$(pwd) + +image_name="${1%:*}" +image_tag="${1#*:}" +if [ "$image_tag" == "$1" ]; then + image_tag="latest" +fi + +disk_image_file="$2" +docker_base_image="$3" + +block_device=/dev/nbd0 + +builddir=$(mktemp -d) + +cleanup() { + umount "$builddir/disk_image" || true + umount "$builddir/workdir" || true + qemu-nbd -d $block_device &> /dev/null || true + rm -rf $builddir +} +trap cleanup EXIT + +# Mount disk image +modprobe nbd max_part=63 +qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" +mkdir "$builddir/disk_image" +mount -o ro ${block_device} "$builddir/disk_image" + +mkdir "$builddir/workdir" +mkdir "$builddir/diff" + +base_image_mounts="" + +# Unpack base image +if [ -n "$docker_base_image" ]; then + mkdir -p "$builddir/base" + docker pull "$docker_base_image" + docker save "$docker_base_image" | tar -xC "$builddir/base" + + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + while [ -n "$image_id" ]; do + mkdir -p "$builddir/base/$image_id/layer" + tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" + + base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" + image_id=$(docker inspect -f "{{.Parent}}" "$image_id") + done +fi + +# Mount work directory +mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" + +# Update files +cd $builddir +LC_ALL=C diff -rq disk_image workdir \ + | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ + | while read action entry; do + case "$action" in + ADD|UPDATE) + cp -a "disk_image$entry" "workdir$entry" + ;; + DEL) + rm -rf "workdir$entry" + ;; + *) + echo "Error: unknown diff line: $action $entry" >&2 + ;; + esac + done + +# Pack new image +new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" +mkdir -p $builddir/result/$new_image_id +cd diff +tar -cf $builddir/result/$new_image_id/layer.tar * +echo "1.0" > $builddir/result/$new_image_id/VERSION +cat > $builddir/result/$new_image_id/json <<-EOS +{ "docker_version": "1.4.1" +, "id": "$new_image_id" +, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" +EOS + +if [ -n "$docker_base_image" ]; then + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json +fi + +echo "}" >> $builddir/result/$new_image_id/json + +echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories + +cd $builddir/result + +# mkdir -p $CURDIR/$image_name +# cp -r * $CURDIR/$image_name +tar -c * | docker load diff --git a/contrib/download-frozen-image-v1.sh b/contrib/download-frozen-image-v1.sh new file mode 100755 index 0000000..77c91d1 --- /dev/null +++ b/contrib/download-frozen-image-v1.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@image-id] ..." + echo " ie: $0 /tmp/hello-world hello-world" + echo " $0 /tmp/debian-jessie debian:jessie" + echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" + echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + tag="${imageTag#*:}" + imageId="${tag##*@}" + [ "$imageId" != "$tag" ] || imageId= + [ "$tag" != "$imageTag" ] || tag='latest' + tag="${tag%@*}" + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" + + if [ -z "$imageId" ]; then + imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" + imageId="${imageId//\"/}" + fi + + ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" + if [ "${ancestryJson:0:1}" != '[' ]; then + echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" + echo >&2 " $ancestryJson" + exit 1 + fi + + IFS=',' + ancestry=( ${ancestryJson//[\[\] \"]/} ) + unset IFS + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." + for imageId in "${ancestry[@]}"; do + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/contrib/download-frozen-image-v2.sh b/contrib/download-frozen-image-v2.sh new file mode 100755 index 0000000..2326790 --- /dev/null +++ b/contrib/download-frozen-image-v2.sh @@ -0,0 +1,307 @@ +#!/usr/bin/env bash +set -eo pipefail + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi +if ! command -v jq &> /dev/null; then + echo >&2 'error: "jq" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@digest] ..." + echo " $0 /tmp/old-hello-world hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +manifestJsonEntries=() +doNotGenerateManifestJson= +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +# bash v4 on Windows CI requires CRLF separator +newlineIFS=$'\n' +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) + if [ "$major" -ge 4 ]; then + newlineIFS=$'\r\n' + fi +fi + +registryBase='https://registry-1.docker.io' +authBase='https://auth.docker.io' +authService='registry.docker.io' + +# https://github.com/moby/moby/issues/33700 +fetch_blob() { + local token="$1"; shift + local image="$1"; shift + local digest="$1"; shift + local targetFile="$1"; shift + local curlArgs=( "$@" ) + + local curlHeaders="$( + curl -S "${curlArgs[@]}" \ + -H "Authorization: Bearer $token" \ + "$registryBase/v2/$image/blobs/$digest" \ + -o "$targetFile" \ + -D- + )" + curlHeaders="$(echo "$curlHeaders" | tr -d '\r')" + if [ "$(echo "$curlHeaders" | awk 'NR == 1 { print $2; exit }')" != '200' ]; then + rm -f "$targetFile" + + local blobRedirect="$(echo "$curlHeaders" | awk -F ': ' 'tolower($1) == "location" { print $2; exit }')" + if [ -z "$blobRedirect" ]; then + echo >&2 "error: failed fetching '$image' blob '$digest'" + echo "$curlHeaders" | head -1 >&2 + return 1 + fi + + curl -fSL "${curlArgs[@]}" \ + "$blobRedirect" \ + -o "$targetFile" + fi +} + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + imageTag="${imageTag#*:}" + digest="${imageTag##*@}" + tag="${imageTag%%@*}" + + # add prefix library if passed official image + if [[ "$image" != *"/"* ]]; then + image="library/$image" + fi + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" + + manifestJson="$( + curl -fsSL \ + -H "Authorization: Bearer $token" \ + -H 'Accept: application/vnd.docker.distribution.manifest.v2+json' \ + -H 'Accept: application/vnd.docker.distribution.manifest.v1+json' \ + "$registryBase/v2/$image/manifests/$digest" + )" + if [ "${manifestJson:0:1}" != '{' ]; then + echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" + echo >&2 " $manifestJson" + exit 1 + fi + + imageIdentifier="$image:$tag@$digest" + + schemaVersion="$(echo "$manifestJson" | jq --raw-output '.schemaVersion')" + case "$schemaVersion" in + 2) + mediaType="$(echo "$manifestJson" | jq --raw-output '.mediaType')" + + case "$mediaType" in + application/vnd.docker.distribution.manifest.v2+json) + configDigest="$(echo "$manifestJson" | jq --raw-output '.config.digest')" + imageId="${configDigest#*:}" # strip off "sha256:" + + configFile="$imageId.json" + fetch_blob "$token" "$image" "$configDigest" "$dir/$configFile" -s + + layersFs="$(echo "$manifestJson" | jq --raw-output --compact-output '.layers[]')" + IFS="$newlineIFS" + layers=( $layersFs ) + unset IFS + + echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..." + layerId= + layerFiles=() + for i in "${!layers[@]}"; do + layerMeta="${layers[$i]}" + + layerMediaType="$(echo "$layerMeta" | jq --raw-output '.mediaType')" + layerDigest="$(echo "$layerMeta" | jq --raw-output '.digest')" + + # save the previous layer's ID + parentId="$layerId" + # create a new fake layer ID based on this layer's digest and the previous layer's fake ID + layerId="$(echo "$parentId"$'\n'"$layerDigest" | sha256sum | cut -d' ' -f1)" + # this accounts for the possibility that an image contains the same layer twice (and thus has a duplicate digest value) + + mkdir -p "$dir/$layerId" + echo '1.0' > "$dir/$layerId/VERSION" + + if [ ! -s "$dir/$layerId/json" ]; then + parentJson="$(printf ', parent: "%s"' "$parentId")" + addJson="$(printf '{ id: "%s"%s }' "$layerId" "${parentId:+$parentJson}")" + # this starter JSON is taken directly from Docker's own "docker save" output for unimportant layers + jq "$addJson + ." > "$dir/$layerId/json" <<-'EOJSON' + { + "created": "0001-01-01T00:00:00Z", + "container_config": { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": null, + "Image": "", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": null + } + } + EOJSON + fi + + case "$layerMediaType" in + application/vnd.docker.image.rootfs.diff.tar.gzip) + layerTar="$layerId/layer.tar" + layerFiles=( "${layerFiles[@]}" "$layerTar" ) + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$layerTar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${layerId:0:12}" + continue + fi + token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" + fetch_blob "$token" "$image" "$layerDigest" "$dir/$layerTar" --progress + ;; + + *) + echo >&2 "error: unknown layer mediaType ($imageIdentifier, $layerDigest): '$layerMediaType'" + exit 1 + ;; + esac + done + + # change "$imageId" to be the ID of the last layer we added (needed for old-style "repositories" file which is created later -- specifically for older Docker daemons) + imageId="$layerId" + + # munge the top layer image manifest to have the appropriate image configuration for older daemons + imageOldConfig="$(jq --raw-output --compact-output '{ id: .id } + if .parent then { parent: .parent } else {} end' "$dir/$imageId/json")" + jq --raw-output "$imageOldConfig + del(.history, .rootfs)" "$dir/$configFile" > "$dir/$imageId/json" + + manifestJsonEntry="$( + echo '{}' | jq --raw-output '. + { + Config: "'"$configFile"'", + RepoTags: ["'"${image#library\/}:$tag"'"], + Layers: '"$(echo '[]' | jq --raw-output ".$(for layerFile in "${layerFiles[@]}"; do echo " + [ \"$layerFile\" ]"; done)")"' + }' + )" + manifestJsonEntries=( "${manifestJsonEntries[@]}" "$manifestJsonEntry" ) + ;; + + *) + echo >&2 "error: unknown manifest mediaType ($imageIdentifier): '$mediaType'" + exit 1 + ;; + esac + ;; + + 1) + if [ -z "$doNotGenerateManifestJson" ]; then + echo >&2 "warning: '$imageIdentifier' uses schemaVersion '$schemaVersion'" + echo >&2 " this script cannot (currently) recreate the 'image config' to put in a 'manifest.json' (thus any schemaVersion 2+ images will be imported in the old way, and their 'docker history' will suffer)" + echo >&2 + doNotGenerateManifestJson=1 + fi + + layersFs="$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum')" + IFS="$newlineIFS" + layers=( $layersFs ) + unset IFS + + history="$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]')" + imageId="$(echo "$history" | jq --raw-output '.[0]' | jq --raw-output '.id')" + + echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..." + for i in "${!layers[@]}"; do + imageJson="$(echo "$history" | jq --raw-output ".[${i}]")" + layerId="$(echo "$imageJson" | jq --raw-output '.id')" + imageLayer="${layers[$i]}" + + mkdir -p "$dir/$layerId" + echo '1.0' > "$dir/$layerId/VERSION" + + echo "$imageJson" > "$dir/$layerId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$layerId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${layerId:0:12}" + continue + fi + token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" + fetch_blob "$token" "$image" "$imageLayer" "$dir/$layerId/layer.tar" --progress + done + ;; + + *) + echo >&2 "error: unknown manifest schemaVersion ($imageIdentifier): '$schemaVersion'" + exit 1 + ;; + esac + + echo + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + image="${image#library\/}" + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +if [ -z "$doNotGenerateManifestJson" ] && [ "${#manifestJsonEntries[@]}" -gt 0 ]; then + echo '[]' | jq --raw-output ".$(for entry in "${manifestJsonEntries[@]}"; do echo " + [ $entry ]"; done)" > "$dir/manifest.json" +else + rm -f "$dir/manifest.json" +fi + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/contrib/editorconfig b/contrib/editorconfig new file mode 100644 index 0000000..97eda89 --- /dev/null +++ b/contrib/editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true + +[*.md] +indent_size = 2 +indent_style = space diff --git a/contrib/gitdm/aliases b/contrib/gitdm/aliases new file mode 100644 index 0000000..dd5dd34 --- /dev/null +++ b/contrib/gitdm/aliases @@ -0,0 +1,148 @@ +Danny.Yates@mailonline.co.uk danny@codeaholics.org +KenCochrane@gmail.com kencochrane@gmail.com +LÉVEIL thomasleveil@gmail.com +Vincent.Bernat@exoscale.ch bernat@luffy.cx +acidburn@docker.com jess@docker.com +admin@jtlebi.fr jt@yadutaf.fr +ahmetalpbalkan@gmail.com ahmetb@microsoft.com +aj@gandi.net aj@gandi.net +albers@users.noreply.github.com github@albersweb.de +alexander.larsson@gmail.com alexl@redhat.com +amurdaca@redhat.com antonio.murdaca@gmail.com +amy@gandi.net aj@gandi.net +andrew.weiss@microsoft.com andrew.weiss@outlook.com +angt@users.noreply.github.com adrien@gallouet.fr +ankushagarwal@users.noreply.github.com ankushagarwal11@gmail.com +anonymouse2048@gmail.com lheckemann@twig-world.com +anusha@docker.com anusha.ragunathan@docker.com +asarai@suse.com asarai@suse.de +avi.miller@gmail.com avi.miller@oracle.com +bernat@luffy.cx Vincent.Bernat@exoscale.ch +bgoff@cpuguy83-mbp.home cpuguy83@gmail.com +brandon@ifup.co brandon@ifup.org +brent@docker.com brent.salisbury@docker.com +charmes.guillaume@gmail.com guillaume.charmes@docker.com +chenchun.feed@gmail.com ramichen@tencent.com +chooper@plumata.com charles.hooper@dotcloud.com +crosby.michael@gmail.com michael@docker.com +crosbymichael@gmail.com michael@docker.com +cyphar@cyphar.com asarai@suse.de +daehyeok@daehyeok-ui-MacBook-Air.local daehyeok@gmail.com +daehyeok@daehyeokui-MacBook-Air.local daehyeok@gmail.com +daniel.norberg@gmail.com dano@spotify.com +daniel@dotcloud.com daniel.mizyrycki@dotcloud.com +darren@rancher.com darren.s.shepherd@gmail.com +dave@dtucker.co.uk dt@docker.com +dev@vvieux.com victor.vieux@docker.com +dgasienica@zynga.com daniel@gasienica.ch +dnephin@gmail.com dnephin@docker.com +dominikh@fork-bomb.org dominik@honnef.co +dqminh89@gmail.com dqminh@cloudflare.com +dsxiao@dataman-inc.com dxiao@redhat.com +duglin@users.noreply.github.com dug@us.ibm.com +eric.hanchrow@gmail.com ehanchrow@ine.com +erik+github@hollensbe.org github@hollensbe.org +estesp@gmail.com estesp@linux.vnet.ibm.com +ewindisch@docker.com eric@windisch.us +f.joffrey@gmail.com joffrey@docker.com +fkautz@alumni.cmu.edu fkautz@redhat.com +frank.rosquin@gmail.com frank.rosquin+github@gmail.com +gh@mattyw.net mattyw@me.com +git@julienbordellier.com julienbordellier@gmail.com +github@metaliveblog.com github@developersupport.net +github@srid.name sridharr@activestate.com +guillaume.charmes@dotcloud.com guillaume.charmes@docker.com +guillaume@charmes.net guillaume.charmes@docker.com +guillaume@docker.com guillaume.charmes@docker.com +guillaume@dotcloud.com guillaume.charmes@docker.com +haoshuwei24@gmail.com haosw@cn.ibm.com +hollie.teal@docker.com hollie@docker.com +hollietealok@users.noreply.github.com hollie@docker.com +hsinko@users.noreply.github.com 21551195@zju.edu.cn +iamironbob@gmail.com altsysrq@gmail.com +icecrime@gmail.com arnaud.porterie@docker.com +jatzen@gmail.com jacob@jacobatzen.dk +jeff@allingeek.com jeff.nickoloff@gmail.com +jefferya@programmerq.net jeff@docker.com +jerome.petazzoni@dotcloud.com jerome.petazzoni@dotcloud.com +jfrazelle@users.noreply.github.com jess@docker.com +jhoward@microsoft.com John.Howard@microsoft.com +jlhawn@berkeley.edu josh.hawn@docker.com +joffrey@dotcloud.com joffrey@docker.com +john.howard@microsoft.com John.Howard@microsoft.com +jp@enix.org jerome.petazzoni@dotcloud.com +justin.cormack@unikernel.com justin.cormack@docker.com +justin.simonelis@PTS-JSIMON2.toronto.exclamation.com justin.p.simonelis@gmail.com +justin@specialbusservice.com justin.cormack@docker.com +katsuta_soshi@cyberagent.co.jp soshi.katsuta@gmail.com +kuehnle@online.de git.nivoc@neverbox.com +kwk@users.noreply.github.com konrad.wilhelm.kleine@gmail.com +leijitang@gmail.com leijitang@huawei.com +liubin0329@gmail.com liubin0329@users.noreply.github.com +lk4d4math@gmail.com lk4d4@docker.com +louis@dotcloud.com kalessin@kalessin.fr +lsm5@redhat.com lsm5@fedoraproject.org +lyndaoleary@hotmail.com lyndaoleary29@gmail.com +madhu@socketplane.io madhu@docker.com +martins@noironetworks.com aanm90@gmail.com +mary@docker.com mary.anthony@docker.com +mastahyeti@users.noreply.github.com mastahyeti@gmail.com +maztaim@users.noreply.github.com taim@bosboot.org +me@runcom.ninja antonio.murdaca@gmail.com +mheon@mheonlaptop.redhat.com mheon@redhat.com +michael@crosbymichael.com michael@docker.com +mohitsoni1989@gmail.com mosoni@ebay.com +moxieandmore@gmail.com mary.anthony@docker.com +moyses.furtado@wplex.com.br moysesb@gmail.com +msabramo@gmail.com marc@marc-abramowitz.com +mzdaniel@glidelink.net daniel.mizyrycki@dotcloud.com +nathan.leclaire@gmail.com nathan.leclaire@docker.com +nathanleclaire@gmail.com nathan.leclaire@docker.com +ostezer@users.noreply.github.com ostezer@gmail.com +peter@scraperwiki.com p@pwaller.net +princess@docker.com jess@docker.com +proppy@aminche.com proppy@google.com +qhuang@10.0.2.15 h.huangqiang@huawei.com +resouer@gmail.com resouer@163.com +roberto_hashioka@hotmail.com roberto.hashioka@docker.com +root@vagrant-ubuntu-12.10.vagrantup.com daniel.mizyrycki@dotcloud.com +runcom@linux.com antonio.murdaca@gmail.com +runcom@redhat.com antonio.murdaca@gmail.com +runcom@users.noreply.github.com antonio.murdaca@gmail.com +s@docker.com solomon@docker.com +shawnlandden@gmail.com shawn@churchofgit.com +singh.gurjeet@gmail.com gurjeet@singh.im +sjoerd@byte.nl sjoerd-github@linuxonly.nl +smahajan@redhat.com shishir.mahajan@redhat.com +solomon.hykes@dotcloud.com solomon@docker.com +solomon@dotcloud.com solomon@docker.com +stefanb@us.ibm.com stefanb@linux.vnet.ibm.com +stevvooe@users.noreply.github.com stephen.day@docker.com +superbaloo+registrations.github@superbaloo.net baloo@gandi.net +tangicolin@gmail.com tangicolin@gmail.com +thaJeztah@users.noreply.github.com github@gone.nl +thatcher@dotcloud.com thatcher@docker.com +thatcher@gmx.net thatcher@docker.com +tibor@docker.com teabee89@gmail.com +tiborvass@users.noreply.github.com teabee89@gmail.com +timruffles@googlemail.com oi@truffles.me.uk +tintypemolly@Ohui-MacBook-Pro.local tintypemolly@gmail.com +tj@init.me tejesh.mehta@gmail.com +tristan.carel@gmail.com tristan@cogniteev.com +unclejack@users.noreply.github.com cristian.staretu@gmail.com +unclejacksons@gmail.com cristian.staretu@gmail.com +vbatts@hashbangbash.com vbatts@redhat.com +victor.vieux@dotcloud.com victor.vieux@docker.com +victor@docker.com victor.vieux@docker.com +victor@dotcloud.com victor.vieux@docker.com +victorvieux@gmail.com victor.vieux@docker.com +vieux@docker.com victor.vieux@docker.com +vincent+github@demeester.fr vincent@sbr.pm +vincent@bernat.im bernat@luffy.cx +vojnovski@gmail.com viktor.vojnovski@amadeus.com +whoshuu@gmail.com huu@prismskylabs.com +xiaods@gmail.com dxiao@redhat.com +xlgao@zju.edu.cn xlgao@zju.edu.cn +yestin.sun@polyera.com sunyi0804@gmail.com +yuchangchun1@huawei.com yuchangchun1@huawei.com +zjaffee@us.ibm.com zij@case.edu diff --git a/contrib/gitdm/domain-map b/contrib/gitdm/domain-map new file mode 100644 index 0000000..17a287e --- /dev/null +++ b/contrib/gitdm/domain-map @@ -0,0 +1,47 @@ +# +# Docker +# + +docker.com Docker +dotcloud.com Docker + +aluzzardi@gmail.com Docker +cpuguy83@gmail.com Docker +derek@mcgstyle.net Docker +github@gone.nl Docker +kencochrane@gmail.com Docker +mickael.laventure@gmail.com Docker +sam.alba@gmail.com Docker +svendowideit@fosiki.com Docker +svendowideit@home.org.au Docker +tonistiigi@gmail.com Docker + +cristian.staretu@gmail.com Docker < 2015-01-01 +cristian.staretu@gmail.com Cisco + +github@hollensbe.org Docker < 2015-01-01 +github@hollensbe.org Cisco + +david.calavera@gmail.com Docker < 2016-04-01 +david.calavera@gmail.com (Unknown) + +madhu@socketplane.io Docker +ejhazlett@gmail.com Docker +ben@firshman.co.uk Docker + +vincent@sbr.pm (Unknown) < 2016-10-24 +vincent@sbr.pm Docker + +# +# Others +# + +cisco.com Cisco +google.com Google +ibm.com IBM +huawei.com Huawei +microsoft.com Microsoft + +redhat.com Red Hat +mrunalp@gmail.com Red Hat +antonio.murdaca@gmail.com Red Hat diff --git a/contrib/gitdm/generate_aliases.sh b/contrib/gitdm/generate_aliases.sh new file mode 100755 index 0000000..dfff5ff --- /dev/null +++ b/contrib/gitdm/generate_aliases.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# +# This script generates a gitdm compatible email aliases file from a git +# formatted .mailmap file. +# +# Usage: +# $> ./generate_aliases > aliases +# + +cat $1 | \ + grep -v '^#' | \ + sed 's/^[^<]*<\([^>]*\)>/\1/' | \ + grep '<.*>' | sed -e 's/[<>]/ /g' | \ + awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \ + sort | uniq diff --git a/contrib/gitdm/gitdm.config b/contrib/gitdm/gitdm.config new file mode 100644 index 0000000..d9b62b0 --- /dev/null +++ b/contrib/gitdm/gitdm.config @@ -0,0 +1,17 @@ +# +# EmailAliases lets us cope with developers who use more +# than one address. +# +EmailAliases aliases + +# +# EmailMap does the main work of mapping addresses onto +# employers. +# +EmailMap domain-map + +# +# Use GroupMap to map a file full of addresses to the +# same employer +# +# GroupMap company-Docker Docker diff --git a/contrib/httpserver/Dockerfile b/contrib/httpserver/Dockerfile new file mode 100644 index 0000000..747dc91 --- /dev/null +++ b/contrib/httpserver/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/contrib/httpserver/Dockerfile.solaris b/contrib/httpserver/Dockerfile.solaris new file mode 100644 index 0000000..3d0d691 --- /dev/null +++ b/contrib/httpserver/Dockerfile.solaris @@ -0,0 +1,4 @@ +FROM solaris +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/contrib/httpserver/server.go b/contrib/httpserver/server.go new file mode 100644 index 0000000..a75d5ab --- /dev/null +++ b/contrib/httpserver/server.go @@ -0,0 +1,12 @@ +package main + +import ( + "log" + "net/http" +) + +func main() { + fs := http.FileServer(http.Dir("/static")) + http.Handle("/", fs) + log.Panic(http.ListenAndServe(":80", nil)) +} diff --git a/contrib/init/openrc/docker.confd b/contrib/init/openrc/docker.confd new file mode 100644 index 0000000..f625261 --- /dev/null +++ b/contrib/init/openrc/docker.confd @@ -0,0 +1,23 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +# this contains both stdout and stderr. If you need to separate them, +# see the settings below +#DOCKER_LOGFILE="/var/log/docker.log" + +# where the docker daemon stdout gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_OUTFILE="/var/log/docker-out.log" + +# where the docker daemon stderr gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_ERRFILE="/var/log/docker-err.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# where the docker daemon itself is run from +#DOCKERD_BINARY="/usr/bin/docker" + +# any other random options you want to pass to docker +DOCKER_OPTS="" diff --git a/contrib/init/openrc/docker.initd b/contrib/init/openrc/docker.initd new file mode 100644 index 0000000..6c968f6 --- /dev/null +++ b/contrib/init/openrc/docker.initd @@ -0,0 +1,24 @@ +#!/sbin/openrc-run +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +command="${DOCKERD_BINARY:-/usr/bin/dockerd}" +pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" +command_args="-p \"${pidfile}\" ${DOCKER_OPTS}" +DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" +DOCKER_ERRFILE="${DOCKER_ERRFILE:-${DOCKER_LOGFILE}}" +DOCKER_OUTFILE="${DOCKER_OUTFILE:-${DOCKER_LOGFILE}}" +start_stop_daemon_args="--background \ + --stderr \"${DOCKER_ERRFILE}\" --stdout \"${DOCKER_OUTFILE}\"" + +start_pre() { + checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + ulimit -u unlimited + + return 0 +} diff --git a/contrib/init/systemd/REVIEWERS b/contrib/init/systemd/REVIEWERS new file mode 100644 index 0000000..b9ba55b --- /dev/null +++ b/contrib/init/systemd/REVIEWERS @@ -0,0 +1,3 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service new file mode 100644 index 0000000..5174631 --- /dev/null +++ b/contrib/init/systemd/docker.service @@ -0,0 +1,34 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network-online.target docker.socket firewalld.service +Wants=network-online.target +Requires=docker.socket + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd -H fd:// +ExecReload=/bin/kill -s HUP $MAINPID +LimitNOFILE=1048576 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/contrib/init/systemd/docker.service.rpm b/contrib/init/systemd/docker.service.rpm new file mode 100644 index 0000000..6c60646 --- /dev/null +++ b/contrib/init/systemd/docker.service.rpm @@ -0,0 +1,33 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network-online.target firewalld.service +Wants=network-online.target + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/contrib/init/systemd/docker.socket b/contrib/init/systemd/docker.socket new file mode 100644 index 0000000..7dd9509 --- /dev/null +++ b/contrib/init/systemd/docker.socket @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker new file mode 100755 index 0000000..d99b4c2 --- /dev/null +++ b/contrib/init/sysvinit-debian/docker @@ -0,0 +1,156 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=docker + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKERD=/usr/bin/dockerd +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# Check docker is present +if [ ! -x $DOCKERD ]; then + log_failure_msg "$DOCKERD not present or not executable" + exit 1 +fi + +check_init() { + # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) + if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 + fi +} + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + check_init + + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + if [ "$BASH" ]; then + ulimit -u unlimited + else + ulimit -p unlimited + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKERD" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + check_init + fail_unless_root + if [ -f "$DOCKER_SSD_PIDFILE" ]; then + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 + log_end_msg $? + else + log_warning_msg "Docker already stopped - file $DOCKER_SSD_PIDFILE not found." + fi + ;; + + restart) + check_init + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + check_init + fail_unless_root + $0 restart + ;; + + status) + check_init + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC" + ;; + + *) + echo "Usage: service docker {start|stop|restart|status}" + exit 1 + ;; +esac diff --git a/contrib/init/sysvinit-debian/docker.default b/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 0000000..c4e9319 --- /dev/null +++ b/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,20 @@ +# Docker Upstart and SysVinit configuration file + +# +# THIS FILE DOES NOT APPLY TO SYSTEMD +# +# Please see the documentation for "systemd drop-ins": +# https://docs.docker.com/engine/admin/systemd/ +# + +# Customize location of Docker binary (especially for development testing). +#DOCKERD="/usr/local/bin/dockerd" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export DOCKER_TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker new file mode 100755 index 0000000..df9b02a --- /dev/null +++ b/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,153 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.com +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.com + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.com +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +unshare=/usr/bin/unshare +exec="/usr/bin/dockerd" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + if [ ! -x $exec ]; then + if [ ! -e $exec ]; then + echo "Docker executable $exec not found" + else + echo "You do not have permission to execute the Docker executable $exec" + fi + exit 5 + fi + + check_for_cleanup + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + "$unshare" -m -- $exec $other_args >> $logfile 2>&1 & + pid=$! + touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + echo -n '.' + done + if [ ! -f $pidfile ]; then + failure + echo + exit 1 + fi + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile -d 300 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +check_for_cleanup() { + if [ -f ${pidfile} ]; then + /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} + fi +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/contrib/init/sysvinit-redhat/docker.sysconfig b/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 0000000..0864b3d --- /dev/null +++ b/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker daemon + +other_args="" diff --git a/contrib/init/upstart/REVIEWERS b/contrib/init/upstart/REVIEWERS new file mode 100644 index 0000000..03ee2dd --- /dev/null +++ b/contrib/init/upstart/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf new file mode 100644 index 0000000..d58f7d6 --- /dev/null +++ b/contrib/init/upstart/docker.conf @@ -0,0 +1,72 @@ +description "Docker daemon" + +start on (filesystem and net-device-up IFACE!=lo) +stop on runlevel [!2345] + +limit nofile 524288 1048576 + +# Having non-zero limits causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +limit nproc unlimited unlimited + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKERD=/usr/bin/dockerd + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKERD" $DOCKER_OPTS --raw-logs +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + DOCKER_SOCKET= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + DOCKER_SOCKET=/var/run/docker.sock + else + DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)' | sed 1q) + fi + + if [ -n "$DOCKER_SOCKET" ]; then + while ! [ -e "$DOCKER_SOCKET" ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for $DOCKER_SOCKET" + sleep 0.1 + done + echo "$DOCKER_SOCKET is up" + fi +end script diff --git a/contrib/install.sh b/contrib/install.sh new file mode 100755 index 0000000..dd5e85e --- /dev/null +++ b/contrib/install.sh @@ -0,0 +1,53 @@ +#!/bin/sh + +tag="17.06-rev1" +tag=$(echo "$tag" | sed 's|+|.|g') + +machine=$(uname -m) + +case "$machine" in + "armv5"*) + arch="armv5" + ;; + "armv6"*) + arch="armv6" + ;; + "armv7"*) + arch="armv7" + ;; + "armv8"*) + arch="aarch64" + ;; + "aarch64"*) + arch="aarch64" + ;; + "i386") + arch="i386" + ;; + "i686") + arch="i386" + ;; + "x86_64") + arch="x86_64" + ;; + *) + echo "Unknown machine type: $machine" + exit 1 +esac + +url="https://github.com/resin-os/balena/releases/download/${tag}/balena-${tag}-${arch}.tar.gz" + +curl -sL "$url" | sudo tar xzv -C /usr/local/bin --strip-components=1 + +cat </dev/null + echo "$BUNDLE_PATH" > "$DATABASE_KEY" + git -C "$DATABASE" add "$DATABASE_KEY" + git -C "$DATABASE" commit -m "update bundle to $BUNDLE_PATH" + rm -f /usr/local/bin/docker + cp "$CLIENT_PATH" /usr/local/bin + echo "Bundle installed. Restart Docker to use. To uninstall, reset Docker to factory defaults." + ;; +"undo") + git -C "$DATABASE" reset --hard >/dev/null + [ -f "$DATABASE_KEY" ] || errexit "bundle not set" + git -C "$DATABASE" rm "$DATABASE_KEY" + git -C "$DATABASE" commit -m "remove bundle" + rm -f /usr/local/bin/docker + ln -s "$HOME/Library/Group Containers/group.com.docker/bin/docker" /usr/local/bin + echo "Bundle removed. Using dev versions may cause issues, a reset to factory defaults is recommended." + ;; +esac diff --git a/contrib/mkimage-alpine.sh b/contrib/mkimage-alpine.sh new file mode 100755 index 0000000..a271eff --- /dev/null +++ b/contrib/mkimage-alpine.sh @@ -0,0 +1,90 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository] [-a arch]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $MAINREPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $MAINREPO > $ROOTFS/etc/apk/repositories + printf '%s\n' $ADDITIONALREPO >> $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t --rm alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:sc:a:" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + c) + ADDITIONALREPO=$OPTARG + ;; + a) + ARCH=$OPTARG + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +MAINREPO=$MIRROR/$REL/main +ADDITIONALREPO=$MIRROR/$REL/${ADDITIONALREPO:-community} +ARCH=${ARCH:-$(uname -m)} + +tmp +getapk +mkbase +conf +pack +save diff --git a/contrib/mkimage-arch-pacman.conf b/contrib/mkimage-arch-pacman.conf new file mode 100644 index 0000000..45fe03d --- /dev/null +++ b/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh new file mode 100755 index 0000000..f941177 --- /dev/null +++ b/contrib/mkimage-arch.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for archlinux and load it into the local +# docker as "archlinux" +# requires root +set -e + +hash pacstrap &>/dev/null || { + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 +} + + +export LANG="C.UTF-8" + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS + +# packages to ignore for space savings +PKGIGNORE=( + cryptsetup + device-mapper + dhcpcd + iproute2 + jfsutils + linux + lvm2 + man-db + man-pages + mdadm + nano + netctl + openresolv + pciutils + pcmciautils + reiserfsprogs + s-nail + systemd-sysvcompat + usbutils + vi + xfsprogs +) +IFS=',' +PKGIGNORE="${PKGIGNORE[*]}" +unset IFS + +arch="$(uname -m)" +case "$arch" in + armv*) + if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then + pacman-key --init + pacman-key --populate archlinuxarm + else + echo "Could not find archlinuxarm-keyring. Please, install it and run pacman-key --populate archlinuxarm" + exit 1 + fi + PACMAN_CONF=$(mktemp ${TMPDIR:-/var/tmp}/pacman-conf-archlinux-XXXXXXXXX) + version="$(echo $arch | cut -c 5)" + sed "s/Architecture = armv/Architecture = armv${version}h/g" './mkimage-archarm-pacman.conf' > "${PACMAN_CONF}" + PACMAN_MIRRORLIST='Server = http://mirror.archlinuxarm.org/$arch/$repo' + PACMAN_EXTRA_PKGS='archlinuxarm-keyring' + EXPECT_TIMEOUT=1800 # Most armv* based devices can be very slow (e.g. RPiv1) + ARCH_KEYRING=archlinuxarm + DOCKER_IMAGE_NAME="armv${version}h/archlinux" + ;; + *) + PACMAN_CONF='./mkimage-arch-pacman.conf' + PACMAN_MIRRORLIST='Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch' + PACMAN_EXTRA_PKGS='' + EXPECT_TIMEOUT=60 + ARCH_KEYRING=archlinux + DOCKER_IMAGE_NAME=archlinux + ;; +esac + +export PACMAN_MIRRORLIST + +expect < $ROOTFS/etc/locale.gen +arch-chroot $ROOTFS locale-gen +arch-chroot $ROOTFS /bin/sh -c 'echo $PACMAN_MIRRORLIST > /etc/pacman.d/mirrorlist' + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 +ln -sf /proc/self/fd $DEV/fd + +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - $DOCKER_IMAGE_NAME +docker run --rm -t $DOCKER_IMAGE_NAME echo Success. +rm -rf $ROOTFS diff --git a/contrib/mkimage-archarm-pacman.conf b/contrib/mkimage-archarm-pacman.conf new file mode 100644 index 0000000..f4b45f5 --- /dev/null +++ b/contrib/mkimage-archarm-pacman.conf @@ -0,0 +1,98 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = armv + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +[alarm] +Include = /etc/pacman.d/mirrorlist + +[aur] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/contrib/mkimage-crux.sh b/contrib/mkimage-crux.sh new file mode 100755 index 0000000..3f0bdca --- /dev/null +++ b/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/contrib/mkimage-pld.sh b/contrib/mkimage-pld.sh new file mode 100755 index 0000000..615c203 --- /dev/null +++ b/contrib/mkimage-pld.sh @@ -0,0 +1,73 @@ +#!/bin/sh +# +# Generate a minimal filesystem for PLD Linux and load it into the local docker as "pld". +# https://www.pld-linux.org/packages/docker +# +set -e + +if [ "$(id -u)" != "0" ]; then + echo >&2 "$0: requires root" + exit 1 +fi + +image_name=pld + +tmpdir=$(mktemp -d ${TMPDIR:-/var/tmp}/pld-docker-XXXXXX) +root=$tmpdir/rootfs +install -d -m 755 $root + +# to clean up: +docker rmi $image_name || : + +# build +rpm -r $root --initdb + +set +e +install -d $root/dev/pts +mknod $root/dev/random c 1 8 -m 644 +mknod $root/dev/urandom c 1 9 -m 644 +mknod $root/dev/full c 1 7 -m 666 +mknod $root/dev/null c 1 3 -m 666 +mknod $root/dev/zero c 1 5 -m 666 +mknod $root/dev/console c 5 1 -m 660 +set -e + +poldek -r $root --up --noask -u \ + --noignore \ + -O 'rpmdef=_install_langs C' \ + -O 'rpmdef=_excludedocs 1' \ + vserver-packages \ + bash iproute2 coreutils grep poldek + +# fix netsharedpath, so containers would be able to install when some paths are mounted +sed -i -e 's;^#%_netsharedpath.*;%_netsharedpath /dev/shm:/sys:/proc:/dev:/etc/hostname;' $root/etc/rpm/macros + +# no need for alternatives +poldek-config -c $root/etc/poldek/poldek.conf ignore systemd-init + +# this makes initscripts to believe network is up +touch $root/var/lock/subsys/network + +# cleanup large optional packages +remove_packages="ca-certificates" +for pkg in $remove_packages; do + rpm -r $root -q $pkg && rpm -r $root -e $pkg --nodeps +done + +# cleanup more +rm -v $root/etc/ld.so.cache +rm -rfv $root/var/cache/hrmib/* +rm -rfv $root/usr/share/man/man?/* +rm -rfv $root/usr/share/locale/*/ +rm -rfv $root/usr/share/help/*/ +rm -rfv $root/usr/share/doc/* +rm -rfv $root/usr/src/examples/* +rm -rfv $root/usr/share/pixmaps/* + +# and import +tar --numeric-owner --xattrs --acls -C $root -c . | docker import - $image_name + +# and test +docker run -i -u root $image_name /bin/echo Success. + +rm -r $tmpdir diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh new file mode 100755 index 0000000..29da170 --- /dev/null +++ b/contrib/mkimage-yum.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +set -e + +usage() { + cat < +OPTIONS: + -p "" The list of packages to install in the container. + The default is blank. + -g "" The groups of packages to install in the container. + The default is "Core". + -y The path to the yum config to install packages from. The + default is /etc/yum.conf for Centos/RHEL and /etc/dnf/dnf.conf for Fedora +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +if [ -f /etc/dnf/dnf.conf ] && command -v dnf &> /dev/null; then + yum_config=/etc/dnf/dnf.conf + alias yum=dnf +fi +install_groups="Core" +while getopts ":y:p:g:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + p) + install_packages="$OPTARG" + ;; + g) + install_groups="$OPTARG" + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +mkdir -m 755 "$target"/dev +mknod -m 600 "$target"/dev/console c 5 1 +mknod -m 600 "$target"/dev/initctl p +mknod -m 666 "$target"/dev/full c 1 7 +mknod -m 666 "$target"/dev/null c 1 3 +mknod -m 666 "$target"/dev/ptmx c 5 2 +mknod -m 666 "$target"/dev/random c 1 8 +mknod -m 666 "$target"/dev/tty c 5 0 +mknod -m 666 "$target"/dev/tty0 c 4 0 +mknod -m 666 "$target"/dev/urandom c 1 9 +mknod -m 666 "$target"/dev/zero c 1 5 + +# amazon linux yum will fail without vars set +if [ -d /etc/yum/vars ]; then + mkdir -p -m 755 "$target"/etc/yum + cp -a /etc/yum/vars "$target"/etc/yum/ +fi + +if [[ -n "$install_groups" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall $install_groups +fi + +if [[ -n "$install_packages" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y install $install_packages +fi + +yum -c "$yum_config" --installroot="$target" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version + +docker run -i -t --rm $name:$version /bin/bash -c 'echo success' + +rm -rf "$target" diff --git a/contrib/mkimage.sh b/contrib/mkimage.sh new file mode 100755 index 0000000..13298c8 --- /dev/null +++ b/contrib/mkimage.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + echo >&2 " $mkimg -t someuser/solaris solaris" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +os= +os=$(uname -o) + +# set up path to gnu tools if solaris +[[ $os == "Solaris" ]] && export PATH=/usr/gnu/bin:$PATH +# TODO check for gnu-tar, gnu-getopt + +# TODO requires root/sudo due to some pkg operations. sigh. +[[ $os == "Solaris" && $EUID != "0" ]] && echo >&2 "image create on Solaris requires superuser privilege" + +optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +compression="auto" +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + --compression) compression="$2" ; shift 2 ;; + --no-compression) compression="none" ; shift 1 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ "$compression" == 'auto' ] || [ -z "$compression" ] +then + compression='xz' +fi + +[ "$compression" == 'none' ] && compression='' + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar${compression:+.$compression}" +touch "$tarFile" + +( + set -x + tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/contrib/mkimage/.febootstrap-minimize b/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 0000000..7749e63 --- /dev/null +++ b/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs and man pages + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/contrib/mkimage/busybox-static b/contrib/mkimage/busybox-static new file mode 100755 index 0000000..e15322b --- /dev/null +++ b/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap new file mode 100755 index 0000000..9f7d898 --- /dev/null +++ b/contrib/mkimage/debootstrap @@ -0,0 +1,251 @@ +#!/usr/bin/env bash +set -e + +mkimgdeb="$(basename "$0")" +mkimg="$(dirname "$0").sh" + +usage() { + echo >&2 "usage: $mkimgdeb rootfsDir suite [debootstrap-args]" + echo >&2 " note: $mkimgdeb meant to be used from $mkimg" + exit 1 +} + +rootfsDir="$1" +if [ -z "$rootfsDir" ]; then + echo >&2 "error: rootfsDir is missing" + echo >&2 + usage +fi +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +if [ -z "$suite" ]; then + echo >&2 "error: suite is missing" + echo >&2 + usage +fi +shift + +# get path to "chroot" in our current PATH +chrootPath="$(type -P chroot || :)" +if [ -z "$chrootPath" ]; then + echo >&2 "error: chroot not found. Are you root?" + echo >&2 + usage +fi + +rootfs_chroot() { + # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! + + # set PATH and chroot away! + PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + "$chrootPath" "$rootfsDir" "$@" +} + +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + +( + set -x + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' + #!/bin/sh + + # For most Docker users, "apt-get install" only happens during "docker build", + # where starting services doesn't work and often fails in humorous ways. This + # prevents those failures by stopping the services from attempting to start. + + exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; rootfs_chroot apt-get clean ) + +# this file is one APT creates to make sure we don't "autoremove" our currently +# in-use kernel, which doesn't really apply to debootstraps/Docker images that +# don't even have kernels installed +rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF + + # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed + echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' + # Since Docker users are looking for the smallest possible final images, the + # following emerges as a very common pattern: + + # RUN apt-get update \ + # && apt-get install -y \ + # && \ + # && apt-get purge -y --auto-remove + + # By default, APT will actually _keep_ packages installed via Recommends or + # Depends if another package Suggests them, even and including if the package + # that originally caused them to be installed is removed. Setting this to + # "false" ensures that APT is appropriately aggressive about removing the + # packages it added. + + # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant + Apt::AutoRemove::SuggestsImportant "false"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if curl -o /dev/null -s --head --fail "http://security.debian.org/dists/$suite/updates/main/binary-$(rootfs_chroot dpkg --print-architecture)/Packages.gz"; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff --git a/contrib/mkimage/mageia-urpmi b/contrib/mkimage/mageia-urpmi new file mode 100755 index 0000000..93fb289 --- /dev/null +++ b/contrib/mkimage/mageia-urpmi @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff --git a/contrib/mkimage/rinse b/contrib/mkimage/rinse new file mode 100755 index 0000000..75eb4f0 --- /dev/null +++ b/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/contrib/mkimage/solaris b/contrib/mkimage/solaris new file mode 100755 index 0000000..158970e --- /dev/null +++ b/contrib/mkimage/solaris @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# +# Solaris 12 base image build script. +# +set -e + +# TODO add optional package publisher origin + +rootfsDir="$1" +shift + +# base install +( + set -x + + pkg image-create --full --zone \ + --facet facet.locale.*=false \ + --facet facet.locale.POSIX=true \ + --facet facet.doc=false \ + --facet facet.doc.*=false \ + "$rootfsDir" + + pkg -R "$rootfsDir" set-property use-system-repo true + + pkg -R "$rootfsDir" set-property flush-content-cache-on-success true + + pkg -R "$rootfsDir" install core-os +) + +# Lay in stock configuration, set up milestone +# XXX This all may become optional in a base image +( + # faster to build repository database on tmpfs + REPO_DB=/system/volatile/repository.$$ + export SVCCFG_REPOSITORY=${REPO_DB} + export SVCCFG_DOOR_PATH=$rootfsDir/system/volatile/tmp_repo_door + + # Import base manifests. NOTE These are a combination of basic requirement + # and gleaned from container milestone manifest. They may change. + for m in $rootfsDir/lib/svc/manifest/system/environment.xml \ + $rootfsDir/lib/svc/manifest/system/svc/global.xml \ + $rootfsDir/lib/svc/manifest/system/svc/restarter.xml \ + $rootfsDir/lib/svc/manifest/network/dns/client.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/switch.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/cache.xml \ + $rootfsDir/lib/svc/manifest/milestone/container.xml ; do + svccfg import $m + done + + # Apply system layer profile, deleting unnecessary dependencies + svccfg apply $rootfsDir/etc/svc/profile/generic_container.xml + + # XXX Even if we keep a repo in the base image, this is definitely optional + svccfg apply $rootfsDir/etc/svc/profile/sysconfig/container_sc.xml + + for s in svc:/system/svc/restarter \ + svc:/system/environment \ + svc:/network/dns/client \ + svc:/system/name-service/switch \ + svc:/system/name-service/cache \ + svc:/system/svc/global \ + svc:/milestone/container ;do + svccfg -s $s refresh + done + + # now copy the built up repository into the base rootfs + mv $REPO_DB $rootfsDir/etc/svc/repository.db +) + +# pkg(1) needs the zoneproxy-client running in the container. +# use a simple wrapper to run it as needed. +# XXX maybe we go back to running this in SMF? +mv "$rootfsDir/usr/bin/pkg" "$rootfsDir/usr/bin/wrapped_pkg" +cat > "$rootfsDir/usr/bin/pkg" <<-'EOF' +#!/bin/sh +# +# THIS FILE CREATED DURING DOCKER BASE IMAGE CREATION +# +# The Solaris base image uses the sysrepo proxy mechanism. The +# IPS client pkg(1) requires the zoneproxy-client to reach the +# remote publisher origins through the host. This wrapper script +# enables and disables the proxy client as needed. This is a +# temporary solution. + +/usr/lib/zones/zoneproxy-client -s localhost:1008 +PKG_SYSREPO_URL=http://localhost:1008 /usr/bin/wrapped_pkg "$@" +pkill -9 zoneproxy-client +EOF +chmod +x "$rootfsDir/usr/bin/pkg" diff --git a/contrib/nnp-test/Dockerfile b/contrib/nnp-test/Dockerfile new file mode 100644 index 0000000..026d869 --- /dev/null +++ b/contrib/nnp-test/Dockerfile @@ -0,0 +1,9 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static nnp-test.c -o /usr/bin/nnp-test + +RUN chmod +s /usr/bin/nnp-test diff --git a/contrib/nnp-test/nnp-test.c b/contrib/nnp-test/nnp-test.c new file mode 100644 index 0000000..b767da7 --- /dev/null +++ b/contrib/nnp-test/nnp-test.c @@ -0,0 +1,10 @@ +#include +#include +#include + +int main(int argc, char *argv[]) +{ + printf("EUID=%d\n", geteuid()); + return 0; +} + diff --git a/contrib/nuke-graph-directory.sh b/contrib/nuke-graph-directory.sh new file mode 100755 index 0000000..3d2f49e --- /dev/null +++ b/contrib/nuke-graph-directory.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if [ "$dir" != "$mount" ] && dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs > /dev/null 2>&1; then + # Find btrfs subvolumes under $dir checking for inode 256 + # Source: http://stackoverflow.com/a/32865333 + for subvol in $(find "$dir" -type d -inum 256 | sort -r); do + if [ "$dir" != "$subvol" ]; then + ( set -x; btrfs subvolume delete "$subvol" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( shopt -s dotglob; set -x; rm -rf "$dir"/* ) diff --git a/contrib/project-stats.sh b/contrib/project-stats.sh new file mode 100755 index 0000000..2691c72 --- /dev/null +++ b/contrib/project-stats.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +## Run this script from the root of the docker repository +## to query project stats useful to the maintainers. +## You will need to install `pulls` and `issues` from +## https://github.com/crosbymichael/pulls + +set -e + +echo -n "Open pulls: " +PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 +echo $PULLS + +echo -n "Pulls alru: " +pulls alru + +echo -n "Open issues: " +ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 +echo $ISSUES + +echo -n "Issues alru: " +issues alru diff --git a/contrib/report-issue.sh b/contrib/report-issue.sh new file mode 100755 index 0000000..cb54f1a --- /dev/null +++ b/contrib/report-issue.sh @@ -0,0 +1,105 @@ +#!/bin/sh + +# This is a convenience script for reporting issues that include a base +# template of information. See https://github.com/docker/docker/pull/8845 + +set -e + +DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} +DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} +DOCKER=${DOCKER:-"docker"} +DOCKER_COMMAND="${DOCKER}" +export DOCKER_COMMAND + +# pulled from https://gist.github.com/cdown/1163649 +function urlencode() { + # urlencode + + local length="${#1}" + for (( i = 0; i < length; i++ )); do + local c="${1:i:1}" + case $c in + [a-zA-Z0-9.~_-]) printf "$c" ;; + *) printf '%%%02X' "'$c" + esac + done +} + +function template() { +# this should always match the template from CONTRIBUTING.md + cat <<- EOM + Description of problem: + + + \`docker version\`: + `${DOCKER_COMMAND} -D version` + + + \`docker info\`: + `${DOCKER_COMMAND} -D info` + + + \`uname -a\`: + `uname -a` + + + Environment details (AWS, VirtualBox, physical, etc.): + + + How reproducible: + + + Steps to Reproduce: + 1. + 2. + 3. + + + Actual Results: + + + Expected Results: + + + Additional info: + + + EOM +} + +function format_issue_url() { + if [ ${#@} -ne 2 ] ; then + return 1 + fi + local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") + local issue_body=$(urlencode "${2}") + echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" +} + + +echo -ne "Do you use \`sudo\` to call docker? [y|N]: " +read -r -n 1 use_sudo +echo "" + +if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then + export DOCKER_COMMAND="sudo ${DOCKER}" +fi + +echo -ne "Title of new issue?: " +read -r issue_title +echo "" + +issue_url=$(format_issue_url "${issue_title}" "$(template)") + +if which xdg-open 2>/dev/null >/dev/null ; then + echo -ne "Would like to launch this report in your browser? [Y|n]: " + read -r -n 1 launch_now + echo "" + + if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then + xdg-open "${issue_url}" + fi +fi + +echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" + diff --git a/contrib/reprepro/suites.sh b/contrib/reprepro/suites.sh new file mode 100755 index 0000000..badb34a --- /dev/null +++ b/contrib/reprepro/suites.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$BASH_SOURCE")/../.." + +targets_from() { + git fetch -q https://github.com/docker/docker.git "$1" + git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb/ | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|^contrib/builder/deb/amd64/|-debootstrap|/Dockerfile$!!g' | grep -v / +} + +release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) +{ targets_from master; targets_from "$release_branch"; } | sort -u diff --git a/contrib/syntax/nano/Dockerfile.nanorc b/contrib/syntax/nano/Dockerfile.nanorc new file mode 100644 index 0000000..8b63dae --- /dev/null +++ b/contrib/syntax/nano/Dockerfile.nanorc @@ -0,0 +1,26 @@ +## Syntax highlighting for Dockerfiles +syntax "Dockerfile" "Dockerfile[^/]*$" + +## Keywords +icolor red "^(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)[[:space:]]" + +## Brackets & parenthesis +color brightgreen "(\(|\)|\[|\])" + +## Double ampersand +color brightmagenta "&&" + +## Comments +icolor cyan "^[[:space:]]*#.*$" + +## Blank space at EOL +color ,green "[[:space:]]+$" + +## Strings, single-quoted +color brightwhite "'([^']|(\\'))*'" "%[qw]\{[^}]*\}" "%[qw]\([^)]*\)" "%[qw]<[^>]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" + +## Strings, double-quoted +color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" + +## Single and double quotes +color brightyellow "('|\")" diff --git a/contrib/syntax/nano/README.md b/contrib/syntax/nano/README.md new file mode 100644 index 0000000..5985208 --- /dev/null +++ b/contrib/syntax/nano/README.md @@ -0,0 +1,32 @@ +Dockerfile.nanorc +================= + +Dockerfile syntax highlighting for nano + +Single User Installation +------------------------ +1. Create a nano syntax directory in your home directory: + * `mkdir -p ~/.nano/syntax` + +2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` + * `cp Dockerfile.nanorc ~/.nano/syntax/` + +3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file + ``` +## Dockerfile files +include "~/.nano/syntax/Dockerfile.nanorc" + ``` + +System Wide Installation +------------------------ +1. Create a nano syntax directory: + * `mkdir /usr/local/share/nano` + +2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` + * `cp Dockerfile.nanorc /usr/local/share/nano/` + +3. Add the following to your `/etc/nanorc`: + ``` +## Dockerfile files +include "/usr/local/share/nano/Dockerfile.nanorc" + ``` diff --git a/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences new file mode 100644 index 0000000..20f0d04 --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences @@ -0,0 +1,24 @@ + + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage new file mode 100644 index 0000000..5a27333 --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -0,0 +1,160 @@ + + + + + fileTypes + + Dockerfile + + name + Dockerfile + patterns + + + captures + + 1 + + name + keyword.other.special-method.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*\b(FROM)\b.*?\b(AS)\b + + + captures + + 1 + + name + keyword.control.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s + + + captures + + 1 + + name + keyword.operator.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s + + + begin + " + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + " + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.double.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + begin + ' + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + ' + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + captures + + 1 + + name + punctuation.whitespace.comment.leading.dockerfile + + 2 + + name + comment.line.number-sign.dockerfile + + 3 + + name + punctuation.definition.comment.dockerfile + + + comment + comment.line + match + ^(\s*)((#).*$\n?) + + + scopeName + source.dockerfile + uuid + a39d8795-59d2-49af-aa00-fe74ee29576e + + diff --git a/contrib/syntax/textmate/Docker.tmbundle/info.plist b/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 0000000..239f4b0 --- /dev/null +++ b/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/contrib/syntax/textmate/README.md b/contrib/syntax/textmate/README.md new file mode 100644 index 0000000..ce61101 --- /dev/null +++ b/contrib/syntax/textmate/README.md @@ -0,0 +1,17 @@ +# Docker.tmbundle + +Dockerfile syntax highlighting for TextMate and Sublime Text. + +## Install + +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. + +enjoy. + diff --git a/contrib/syntax/textmate/REVIEWERS b/contrib/syntax/textmate/REVIEWERS new file mode 100644 index 0000000..965743d --- /dev/null +++ b/contrib/syntax/textmate/REVIEWERS @@ -0,0 +1 @@ +Asbjorn Enge (@asbjornenge) diff --git a/contrib/syntax/vim/LICENSE b/contrib/syntax/vim/LICENSE new file mode 100644 index 0000000..e67cdab --- /dev/null +++ b/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/syntax/vim/README.md b/contrib/syntax/vim/README.md new file mode 100644 index 0000000..5aa9bd8 --- /dev/null +++ b/contrib/syntax/vim/README.md @@ -0,0 +1,26 @@ +dockerfile.vim +============== + +Syntax highlighting for Dockerfiles + +Installation +------------ +With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... + +With [Vundle](https://github.com/gmarik/Vundle.vim) + + Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} + +Features +-------- + +The syntax highlighting includes: + +* The directives (e.g. `FROM`) +* Strings +* Comments + +License +------- + +BSD, short and sweet diff --git a/contrib/syntax/vim/doc/dockerfile.txt b/contrib/syntax/vim/doc/dockerfile.txt new file mode 100644 index 0000000..e69e2b7 --- /dev/null +++ b/contrib/syntax/vim/doc/dockerfile.txt @@ -0,0 +1,18 @@ +*dockerfile.txt* Syntax highlighting for Dockerfiles + +Author: Honza Pokorny +License: BSD + +INSTALLATION *installation* + +Drop it on your Pathogen path and you're all set. + +FEATURES *features* + +The syntax highlighting includes: + +* The directives (e.g. FROM) +* Strings +* Comments + + vim:tw=78:et:ft=help:norl: diff --git a/contrib/syntax/vim/ftdetect/dockerfile.vim b/contrib/syntax/vim/ftdetect/dockerfile.vim new file mode 100644 index 0000000..ee10e5d --- /dev/null +++ b/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -0,0 +1 @@ +au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile diff --git a/contrib/syntax/vim/syntax/dockerfile.vim b/contrib/syntax/vim/syntax/dockerfile.vim new file mode 100644 index 0000000..a067e6a --- /dev/null +++ b/contrib/syntax/vim/syntax/dockerfile.vim @@ -0,0 +1,31 @@ +" dockerfile.vim - Syntax highlighting for Dockerfiles +" Maintainer: Honza Pokorny +" Version: 0.5 + + +if exists("b:current_syntax") + finish +endif + +let b:current_syntax = "dockerfile" + +syntax case ignore + +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s/ +highlight link dockerfileKeyword Keyword + +syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ +highlight link dockerfileString String + +syntax match dockerfileComment "\v^\s*#.*$" +highlight link dockerfileComment Comment + +set commentstring=#\ %s + +" match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell +let s:current_syntax = b:current_syntax +unlet b:current_syntax +syntax include @SH syntax/sh.vim +let b:current_syntax = s:current_syntax +syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH +" since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) diff --git a/contrib/syscall-test/Dockerfile b/contrib/syscall-test/Dockerfile new file mode 100644 index 0000000..f95f175 --- /dev/null +++ b/contrib/syscall-test/Dockerfile @@ -0,0 +1,15 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ + && gcc -g -Wall -static ns.c -o /usr/bin/ns-test \ + && gcc -g -Wall -static acct.c -o /usr/bin/acct-test \ + && gcc -g -Wall -static setuid.c -o /usr/bin/setuid-test \ + && gcc -g -Wall -static setgid.c -o /usr/bin/setgid-test \ + && gcc -g -Wall -static socket.c -o /usr/bin/socket-test \ + && gcc -g -Wall -static raw.c -o /usr/bin/raw-test + +RUN [ "$(uname -m)" = "x86_64" ] && gcc -s -m32 -nostdlib exit32.s -o /usr/bin/exit32-test || true diff --git a/contrib/syscall-test/acct.c b/contrib/syscall-test/acct.c new file mode 100644 index 0000000..88ac287 --- /dev/null +++ b/contrib/syscall-test/acct.c @@ -0,0 +1,16 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + int err = acct("/tmp/t"); + if (err == -1) { + fprintf(stderr, "acct failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/contrib/syscall-test/exit32.s b/contrib/syscall-test/exit32.s new file mode 100644 index 0000000..8bbb5c5 --- /dev/null +++ b/contrib/syscall-test/exit32.s @@ -0,0 +1,7 @@ +.globl _start +.text +_start: + xorl %eax, %eax + incl %eax + movb $0, %bl + int $0x80 diff --git a/contrib/syscall-test/ns.c b/contrib/syscall-test/ns.c new file mode 100644 index 0000000..6243886 --- /dev/null +++ b/contrib/syscall-test/ns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp arguments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/contrib/syscall-test/raw.c b/contrib/syscall-test/raw.c new file mode 100644 index 0000000..7995a0d --- /dev/null +++ b/contrib/syscall-test/raw.c @@ -0,0 +1,14 @@ +#include +#include +#include +#include +#include + +int main() { + if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) { + perror("socket"); + return 1; + } + + return 0; +} diff --git a/contrib/syscall-test/setgid.c b/contrib/syscall-test/setgid.c new file mode 100644 index 0000000..df9680c --- /dev/null +++ b/contrib/syscall-test/setgid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setgid(1) == -1) { + perror("setgid"); + return 1; + } + return 0; +} diff --git a/contrib/syscall-test/setuid.c b/contrib/syscall-test/setuid.c new file mode 100644 index 0000000..5b93967 --- /dev/null +++ b/contrib/syscall-test/setuid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setuid(1) == -1) { + perror("setuid"); + return 1; + } + return 0; +} diff --git a/contrib/syscall-test/socket.c b/contrib/syscall-test/socket.c new file mode 100644 index 0000000..d26c82f --- /dev/null +++ b/contrib/syscall-test/socket.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include +#include +#include + +int main() { + int s; + struct sockaddr_in sin; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (s == -1) { + perror("socket"); + return 1; + } + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = INADDR_ANY; + sin.sin_port = htons(80); + + if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) { + perror("bind"); + return 1; + } + + close(s); + + return 0; +} diff --git a/contrib/syscall-test/userns.c b/contrib/syscall-test/userns.c new file mode 100644 index 0000000..4c5c8d3 --- /dev/null +++ b/contrib/syscall-test/userns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp arguments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWUSER | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/contrib/udev/80-docker.rules b/contrib/udev/80-docker.rules new file mode 100644 index 0000000..f934c01 --- /dev/null +++ b/contrib/udev/80-docker.rules @@ -0,0 +1,3 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/contrib/vagrant-docker/README.md b/contrib/vagrant-docker/README.md new file mode 100644 index 0000000..736c789 --- /dev/null +++ b/contrib/vagrant-docker/README.md @@ -0,0 +1,50 @@ +# Vagrant integration + +Currently there are at least 4 different projects that we are aware of that deals +with integration with [Vagrant](http://vagrantup.com/) at different levels. One +approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) +which means you can create containers and pull base images on VMs using Docker's +CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), +meaning you can use Vagrant to control Docker containers. + + +### Provisioners + +* [Vocker](https://github.com/fgrehm/vocker) +* [Ventriloquist](https://github.com/fgrehm/ventriloquist) + +### Providers + +* [docker-provider](https://github.com/fgrehm/docker-provider) +* [vagrant-shell](https://github.com/destructuring/vagrant-shell) + +## Setting up Vagrant-docker with the Engine API + +The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: + +``` +description "Docker daemon" + +start on filesystem +stop on runlevel [!2345] + +respawn + +script + /usr/bin/dockerd -H=tcp://0.0.0.0:2375 +end script +``` + +Once that's done, you need to set up an SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: + +``` +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost +``` + +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) + +Note that because the port has been changed, to run docker commands from within the command line you must run them like this: + +``` +sudo docker -H 0.0.0.0:2375 < commands for docker > +``` diff --git a/daemon/apparmor_default.go b/daemon/apparmor_default.go new file mode 100644 index 0000000..2a418b2 --- /dev/null +++ b/daemon/apparmor_default.go @@ -0,0 +1,36 @@ +// +build linux + +package daemon + +import ( + "fmt" + + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func ensureDefaultAppArmorProfile() error { + if apparmor.IsEnabled() { + loaded, err := aaprofile.IsLoaded(defaultApparmorProfile) + if err != nil { + return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err) + } + + // Nothing to do. + if loaded { + return nil + } + + // Load the profile. + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultApparmorProfile, err) + } + } + + return nil +} diff --git a/daemon/apparmor_default_unsupported.go b/daemon/apparmor_default_unsupported.go new file mode 100644 index 0000000..cd2dd97 --- /dev/null +++ b/daemon/apparmor_default_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func ensureDefaultAppArmorProfile() error { + return nil +} diff --git a/daemon/archive.go b/daemon/archive.go new file mode 100644 index 0000000..bd00dac --- /dev/null +++ b/daemon/archive.go @@ -0,0 +1,362 @@ +package daemon + +import ( + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, err + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return err + } + + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + options := daemon.defaultTarCopyOptions(noOverwriteDirNonDir) + + if copyUIDGID { + var err error + // tarCopyOptions will appropriately pull in the right uid/gid for the + // user/group and will set the options. + options, err = daemon.tarCopyOptions(container, noOverwriteDirNonDir) + if err != nil { + return err + } + } + + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} diff --git a/daemon/archive_tarcopyoptions.go b/daemon/archive_tarcopyoptions.go new file mode 100644 index 0000000..fe7722f --- /dev/null +++ b/daemon/archive_tarcopyoptions.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "github.com/docker/docker/pkg/archive" +) + +// defaultTarCopyOptions is the setting that is used when unpacking an archive +// for a copy API event. +func (daemon *Daemon) defaultTarCopyOptions(noOverwriteDirNonDir bool) *archive.TarOptions { + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + UIDMaps: daemon.idMappings.UIDs(), + GIDMaps: daemon.idMappings.GIDs(), + } +} diff --git a/daemon/archive_tarcopyoptions_unix.go b/daemon/archive_tarcopyoptions_unix.go new file mode 100644 index 0000000..83e6fd9 --- /dev/null +++ b/daemon/archive_tarcopyoptions_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + if container.Config.User == "" { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil + } + + user, err := idtools.LookupUser(container.Config.User) + if err != nil { + return nil, err + } + + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &idtools.IDPair{UID: user.Uid, GID: user.Gid}, + }, nil +} diff --git a/daemon/archive_tarcopyoptions_windows.go b/daemon/archive_tarcopyoptions_windows.go new file mode 100644 index 0000000..535efd2 --- /dev/null +++ b/daemon/archive_tarcopyoptions_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil +} diff --git a/daemon/archive_unix.go b/daemon/archive_unix.go new file mode 100644 index 0000000..d5dfad7 --- /dev/null +++ b/daemon/archive_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +// isOnlineFSOperationPermitted returns an error if an online filesystem operation +// is not permitted. +func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { + return nil +} diff --git a/daemon/archive_windows.go b/daemon/archive_windows.go new file mode 100644 index 0000000..ab10560 --- /dev/null +++ b/daemon/archive_windows.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "errors" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +// +// This is a no-op on Windows which does not support read-only volumes, or +// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + return false, nil +} + +// isOnlineFSOperationPermitted returns an error if an online filesystem operation +// is not permitted (such as stat or for copying). Running Hyper-V containers +// cannot have their file-system interrogated from the host as the filter is +// loaded inside the utility VM, not the host. +// IMPORTANT: The container lock must NOT be held when calling this function. +func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { + if !container.IsRunning() { + return nil + } + + // Determine isolation. If not specified in the hostconfig, use daemon default. + actualIsolation := container.HostConfig.Isolation + if containertypes.Isolation.IsDefault(containertypes.Isolation(actualIsolation)) { + actualIsolation = daemon.defaultIsolation + } + if containertypes.Isolation.IsHyperV(actualIsolation) { + return errors.New("filesystem operations against a running Hyper-V container are not supported") + } + return nil +} diff --git a/daemon/attach.go b/daemon/attach.go new file mode 100644 index 0000000..3241039 --- /dev/null +++ b/daemon/attach.go @@ -0,0 +1,186 @@ +package daemon + +import ( + "context" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + keys := []byte{} + var err error + if c.DetachKeys != "" { + keys, err = term.ToBytes(c.DetachKeys) + if err != nil { + return fmt.Errorf("Invalid detach keys (%s) provided", c.DetachKeys) + } + } + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused, unpause the container before attach.", prefixOrName) + return errors.NewRequestConflictError(err) + } + if container.IsRestarting() { + err := fmt.Errorf("Container %s is restarting, wait until the container is running.", prefixOrName) + return errors.NewRequestConflictError(err) + } + + cfg := stream.AttachConfig{ + UseStdin: c.UseStdin, + UseStdout: c.UseStdout, + UseStderr: c.UseStderr, + TTY: container.Config.Tty, + CloseStdin: container.Config.StdinOnce, + DetachKeys: keys, + } + container.StreamConfig.AttachStreams(&cfg) + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + if cfg.UseStdin { + cfg.Stdin = inStream + } + if cfg.UseStdout { + cfg.Stdout = outStream + } + if cfg.UseStderr { + cfg.Stderr = errStream + } + + if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + cfg := stream.AttachConfig{ + UseStdin: stdin != nil, + UseStdout: stdout != nil, + UseStderr: stderr != nil, + TTY: container.Config.Tty, + CloseStdin: container.Config.StdinOnce, + } + container.StreamConfig.AttachStreams(&cfg) + close(attached) + if cfg.UseStdin { + cfg.Stdin = stdin + } + if cfg.UseStdout { + cfg.Stdout = stdout + } + if cfg.UseStderr { + cfg.Stderr = stderr + } + + return daemon.containerAttach(container, &cfg, false, doStream) +} + +func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error { + if logs { + logDriver, logCreated, err := daemon.getLogger(c) + if err != nil { + return err + } + if logCreated { + defer func() { + if err = logDriver.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + defer logs.Close() + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && cfg.Stdout != nil { + cfg.Stdout.Write(msg.Line) + } + if msg.Source == "stderr" && cfg.Stderr != nil { + cfg.Stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(c, "attach") + + if !doStream { + return nil + } + + if cfg.Stdin != nil { + r, w := io.Pipe() + go func(stdin io.ReadCloser) { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + io.Copy(w, stdin) + }(cfg.Stdin) + cfg.Stdin = r + } + + if !c.Config.OpenStdin { + cfg.Stdin = nil + } + + if c.Config.StdinOnce && !c.Config.Tty { + // Wait for the container to stop before returning. + waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning) + defer func() { + _ = <-waitChan // Ignore returned exit code. + }() + } + + ctx := c.InitAttachContext() + err := <-c.StreamConfig.CopyStreams(ctx, cfg) + if err != nil { + if _, ok := err.(term.EscapeError); ok { + daemon.LogContainerEvent(c, "detach") + } else { + logrus.Errorf("attach failed with error: %v", err) + } + } + + return nil +} diff --git a/daemon/auth.go b/daemon/auth.go new file mode 100644 index 0000000..f5f4d7b --- /dev/null +++ b/daemon/auth.go @@ -0,0 +1,13 @@ +package daemon + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" +) + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) +} diff --git a/daemon/bindmount_solaris.go b/daemon/bindmount_solaris.go new file mode 100644 index 0000000..87bf3ef --- /dev/null +++ b/daemon/bindmount_solaris.go @@ -0,0 +1,5 @@ +// +build solaris + +package daemon + +const bindMountType = "lofs" diff --git a/daemon/bindmount_unix.go b/daemon/bindmount_unix.go new file mode 100644 index 0000000..3966bab --- /dev/null +++ b/daemon/bindmount_unix.go @@ -0,0 +1,5 @@ +// +build linux freebsd + +package daemon + +const bindMountType = "bind" diff --git a/daemon/build.go b/daemon/build.go new file mode 100644 index 0000000..9b518d6 --- /dev/null +++ b/daemon/build.go @@ -0,0 +1,196 @@ +package daemon + +import ( + "io" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type releaseableLayer struct { + released bool + layerStore layer.Store + roLayer layer.Layer + rwLayer layer.RWLayer +} + +func (rl *releaseableLayer) Mount() (string, error) { + var err error + var chainID layer.ChainID + if rl.roLayer != nil { + chainID = rl.roLayer.ChainID() + } + + mountID := stringid.GenerateRandomID() + rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil) + if err != nil { + return "", errors.Wrap(err, "failed to create rwlayer") + } + + return rl.rwLayer.Mount("") +} + +func (rl *releaseableLayer) Commit(platform string) (builder.ReleaseableLayer, error) { + var chainID layer.ChainID + if rl.roLayer != nil { + chainID = rl.roLayer.ChainID() + } + + stream, err := rl.rwLayer.TarStream() + if err != nil { + return nil, err + } + + newLayer, err := rl.layerStore.Register(stream, chainID, layer.Platform(platform)) + if err != nil { + return nil, err + } + + if layer.IsEmpty(newLayer.DiffID()) { + _, err := rl.layerStore.Release(newLayer) + return &releaseableLayer{layerStore: rl.layerStore}, err + } + return &releaseableLayer{layerStore: rl.layerStore, roLayer: newLayer}, nil +} + +func (rl *releaseableLayer) DiffID() layer.DiffID { + if rl.roLayer == nil { + return layer.DigestSHA256EmptyTar + } + return rl.roLayer.DiffID() +} + +func (rl *releaseableLayer) Release() error { + if rl.released { + return nil + } + rl.released = true + rl.releaseRWLayer() + return rl.releaseROLayer() +} + +func (rl *releaseableLayer) releaseRWLayer() error { + if rl.rwLayer == nil { + return nil + } + metadata, err := rl.layerStore.ReleaseRWLayer(rl.rwLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + logrus.Errorf("Failed to release RWLayer: %s", err) + } + return err +} + +func (rl *releaseableLayer) releaseROLayer() error { + if rl.roLayer == nil { + return nil + } + metadata, err := rl.layerStore.Release(rl.roLayer) + layer.LogReleaseMetadata(metadata) + return err +} + +func newReleasableLayerForImage(img *image.Image, layerStore layer.Store) (builder.ReleaseableLayer, error) { + if img == nil || img.RootFS.ChainID() == "" { + return &releaseableLayer{layerStore: layerStore}, nil + } + // Hold a reference to the image layer so that it can't be removed before + // it is released + roLayer, err := layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) + } + return &releaseableLayer{layerStore: layerStore, roLayer: roLayer}, nil +} + +// TODO: could this use the regular daemon PullImage ? +func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform string) (*image.Image, error) { + ref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config, use it + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.pullImageWithReference(ctx, ref, platform, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +// GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. +// Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent +// leaking of layers. +func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { + if refOrID == "" { + layer, err := newReleasableLayerForImage(nil, daemon.stores[opts.Platform].layerStore) + return nil, layer, err + } + + if opts.PullOption != backend.PullOptionForcePull { + image, err := daemon.GetImage(refOrID) + if err != nil && opts.PullOption == backend.PullOptionNoPull { + return nil, nil, err + } + // TODO: shouldn't we error out if error is different from "not found" ? + if image != nil { + layer, err := newReleasableLayerForImage(image, daemon.stores[opts.Platform].layerStore) + return image, layer, err + } + } + + image, err := daemon.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.Platform) + if err != nil { + return nil, nil, err + } + layer, err := newReleasableLayerForImage(image, daemon.stores[opts.Platform].layerStore) + return image, layer, err +} + +// CreateImage creates a new image by adding a config and ID to the image store. +// This is similar to LoadImage() except that it receives JSON encoded bytes of +// an image instead of a tar archive. +func (daemon *Daemon) CreateImage(config []byte, parent string, platform string) (builder.Image, error) { + if platform == "" { + platform = runtime.GOOS + } + id, err := daemon.stores[platform].imageStore.Create(config) + if err != nil { + return nil, errors.Wrapf(err, "failed to create image") + } + + if parent != "" { + if err := daemon.stores[platform].imageStore.SetParent(id, image.ID(parent)); err != nil { + return nil, errors.Wrapf(err, "failed to set parent %s", parent) + } + } + + return daemon.stores[platform].imageStore.Get(id) +} + +// IDMappings returns uid/gid mappings for the builder +func (daemon *Daemon) IDMappings() *idtools.IDMappings { + return daemon.idMappings +} diff --git a/daemon/cache.go b/daemon/cache.go new file mode 100644 index 0000000..219b0b3 --- /dev/null +++ b/daemon/cache.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/image/cache" +) + +// MakeImageCache creates a stateful image cache. +func (daemon *Daemon) MakeImageCache(sourceRefs []string, platform string) builder.ImageCache { + if len(sourceRefs) == 0 { + return cache.NewLocal(daemon.stores[platform].imageStore) + } + + cache := cache.New(daemon.stores[platform].imageStore) + + for _, ref := range sourceRefs { + img, err := daemon.GetImage(ref) + if err != nil { + logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + continue + } + cache.Populate(img) + } + + return cache +} diff --git a/daemon/caps/utils_unix.go b/daemon/caps/utils_unix.go new file mode 100644 index 0000000..c99485f --- /dev/null +++ b/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/daemon/changes.go b/daemon/changes.go new file mode 100644 index 0000000..fc8cd27 --- /dev/null +++ b/daemon/changes.go @@ -0,0 +1,31 @@ +package daemon + +import ( + "errors" + "runtime" + "time" + + "github.com/docker/docker/pkg/archive" +) + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" && container.IsRunning() { + return nil, errors.New("Windows does not support diff of a running container") + } + + container.Lock() + defer container.Unlock() + c, err := container.RWLayer.Changes() + if err != nil { + return nil, err + } + containerActions.WithValues("changes").UpdateSince(start) + return c, nil +} diff --git a/daemon/checkpoint.go b/daemon/checkpoint.go new file mode 100644 index 0000000..d3028f1 --- /dev/null +++ b/daemon/checkpoint.go @@ -0,0 +1,137 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" +) + +var ( + validCheckpointNameChars = api.RestrictedNameChars + validCheckpointNamePattern = api.RestrictedNamePattern +) + +// getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists +func getCheckpointDir(checkDir, checkpointID string, ctrName string, ctrID string, ctrCheckpointDir string, create bool) (string, error) { + var checkpointDir string + var err2 error + if checkDir != "" { + checkpointDir = filepath.Join(checkDir, ctrID, "checkpoints") + } else { + checkpointDir = ctrCheckpointDir + } + checkpointAbsDir := filepath.Join(checkpointDir, checkpointID) + stat, err := os.Stat(checkpointAbsDir) + if create { + switch { + case err == nil && stat.IsDir(): + err2 = fmt.Errorf("checkpoint with name %s already exists for container %s", checkpointID, ctrName) + case err != nil && os.IsNotExist(err): + err2 = nil + case err != nil: + err2 = err + case err == nil: + err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) + } + } else { + switch { + case err != nil: + err2 = fmt.Errorf("checkpoint %s does not exists for container %s", checkpointID, ctrName) + case err == nil && stat.IsDir(): + err2 = nil + case err == nil: + err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) + } + } + return checkpointDir, err2 +} + +// CheckpointCreate checkpoints the process running in a container with CRIU +func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return fmt.Errorf("Container %s not running", name) + } + + if !validCheckpointNamePattern.MatchString(config.CheckpointID) { + return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars) + } + + checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), true) + if err != nil { + return fmt.Errorf("cannot checkpoint container %s: %s", name, err) + } + + err = daemon.containerd.CreateCheckpoint(container.ID, config.CheckpointID, checkpointDir, config.Exit) + if err != nil { + return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) + } + + daemon.LogContainerEvent(container, "checkpoint") + + return nil +} + +// CheckpointDelete deletes the specified checkpoint +func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDeleteOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), false) + if err == nil { + return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) + } + return err +} + +// CheckpointList lists all checkpoints of the specified container +func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { + var out []types.Checkpoint + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + checkpointDir, err := getCheckpointDir(config.CheckpointDir, "", name, container.ID, container.CheckpointDir(), false) + if err != nil { + return nil, err + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return nil, err + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt types.Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + + return out, nil +} diff --git a/daemon/cluster.go b/daemon/cluster.go new file mode 100644 index 0000000..d22970b --- /dev/null +++ b/daemon/cluster.go @@ -0,0 +1,26 @@ +package daemon + +import ( + apitypes "github.com/docker/docker/api/types" + lncluster "github.com/docker/libnetwork/cluster" +) + +// Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). +type Cluster interface { + ClusterStatus + NetworkManager + SendClusterEvent(event lncluster.ConfigEventType) +} + +// ClusterStatus interface provides information about the Swarm status of the Cluster +type ClusterStatus interface { + IsAgent() bool + IsManager() bool +} + +// NetworkManager provides methods to manage networks +type NetworkManager interface { + GetNetwork(input string) (apitypes.NetworkResource, error) + GetNetworks() ([]apitypes.NetworkResource, error) + RemoveNetwork(input string) error +} diff --git a/daemon/cluster/cluster.go b/daemon/cluster/cluster.go new file mode 100644 index 0000000..6874dbf --- /dev/null +++ b/daemon/cluster/cluster.go @@ -0,0 +1,439 @@ +package cluster + +// +// ## Swarmkit integration +// +// Cluster - static configurable object for accessing everything swarm related. +// Contains methods for connecting and controlling the cluster. Exists always, +// even if swarm mode is not enabled. +// +// NodeRunner - Manager for starting the swarmkit node. Is present only and +// always if swarm mode is enabled. Implements backoff restart loop in case of +// errors. +// +// NodeState - Information about the current node status including access to +// gRPC clients if a manager is active. +// +// ### Locking +// +// `cluster.controlMutex` - taken for the whole lifecycle of the processes that +// can reconfigure cluster(init/join/leave etc). Protects that one +// reconfiguration action has fully completed before another can start. +// +// `cluster.mu` - taken when the actual changes in cluster configurations +// happen. Different from `controlMutex` because in some cases we need to +// access current cluster state even if the long-running reconfiguration is +// going on. For example network stack may ask for the current cluster state in +// the middle of the shutdown. Any time current cluster state is asked you +// should take the read lock of `cluster.mu`. If you are writing an API +// responder that returns synchronously, hold `cluster.mu.RLock()` for the +// duration of the whole handler function. That ensures that node will not be +// shut down until the handler has finished. +// +// NodeRunner implements its internal locks that should not be used outside of +// the struct. Instead, you should just call `nodeRunner.State()` method to get +// the current state of the cluster(still need `cluster.mu.RLock()` to access +// `cluster.nr` reference itself). Most of the changes in NodeRunner happen +// because of an external event(network problem, unexpected swarmkit error) and +// Docker shouldn't take any locks that delay these changes from happening. +// + +import ( + "fmt" + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/pkg/signal" + lncluster "github.com/docker/libnetwork/cluster" + swarmapi "github.com/docker/swarmkit/api" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +const swarmDirName = "swarm" +const controlSocket = "control.sock" +const swarmConnectTimeout = 20 * time.Second +const swarmRequestTimeout = 20 * time.Second +const stateFile = "docker-state.json" +const defaultAddr = "0.0.0.0:2377" + +const ( + initialReconnectDelay = 100 * time.Millisecond + maxReconnectDelay = 30 * time.Second + contextPrefix = "com.docker.swarm" +) + +// errNoSwarm is returned on leaving a cluster that was never initialized +var errNoSwarm = errors.New("This node is not part of a swarm") + +// errSwarmExists is returned on initialize or join request for a cluster that has already been activated +var errSwarmExists = errors.New("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.") + +// errSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. +var errSwarmJoinTimeoutReached = errors.New("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.") + +// errSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. +var errSwarmLocked = errors.New("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.") + +// errSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. +var errSwarmCertificatesExpired = errors.New("Swarm certificates have expired. To replace them, leave the swarm and join again.") + +// NetworkSubnetsProvider exposes functions for retrieving the subnets +// of networks managed by Docker, so they can be filtered. +type NetworkSubnetsProvider interface { + Subnets() ([]net.IPNet, []net.IPNet) +} + +// Config provides values for Cluster. +type Config struct { + Root string + Name string + Backend executorpkg.Backend + NetworkSubnetsProvider NetworkSubnetsProvider + + // DefaultAdvertiseAddr is the default host/IP or network interface to use + // if no AdvertiseAddr value is specified. + DefaultAdvertiseAddr string + + // path to store runtime state, such as the swarm control socket + RuntimeRoot string + + // WatchStream is a channel to pass watch API notifications to daemon + WatchStream chan *swarmapi.WatchMessage +} + +// Cluster provides capabilities to participate in a cluster as a worker or a +// manager. +type Cluster struct { + mu sync.RWMutex + controlMutex sync.RWMutex // protect init/join/leave user operations + nr *nodeRunner + root string + runtimeRoot string + config Config + configEvent chan lncluster.ConfigEventType // todo: make this array and goroutine safe + attachers map[string]*attacher + watchStream chan *swarmapi.WatchMessage +} + +// attacher manages the in-memory attachment state of a container +// attachment to a global scope network managed by swarm manager. It +// helps in identifying the attachment ID via the taskID and the +// corresponding attachment configuration obtained from the manager. +type attacher struct { + taskID string + config *network.NetworkingConfig + inProgress bool + attachWaitCh chan *network.NetworkingConfig + attachCompleteCh chan struct{} + detachWaitCh chan struct{} +} + +// New creates a new Cluster instance using provided config. +func New(config Config) (*Cluster, error) { + root := filepath.Join(config.Root, swarmDirName) + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + if config.RuntimeRoot == "" { + config.RuntimeRoot = root + } + if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil { + return nil, err + } + c := &Cluster{ + root: root, + config: config, + configEvent: make(chan lncluster.ConfigEventType, 10), + runtimeRoot: config.RuntimeRoot, + attachers: make(map[string]*attacher), + watchStream: config.WatchStream, + } + return c, nil +} + +// Start the Cluster instance +// TODO The split between New and Start can be join again when the SendClusterEvent +// method is no longer required +func (c *Cluster) Start() error { + root := filepath.Join(c.config.Root, swarmDirName) + + nodeConfig, err := loadPersistentState(root) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + nr, err := c.newNodeRunner(*nodeConfig) + if err != nil { + return err + } + c.nr = nr + + select { + case <-time.After(swarmConnectTimeout): + logrus.Error("swarm component could not be started before timeout was reached") + case err := <-nr.Ready(): + if err != nil { + logrus.WithError(err).Error("swarm component could not be started") + return nil + } + } + return nil +} + +func (c *Cluster) newNodeRunner(conf nodeStartConfig) (*nodeRunner, error) { + if err := c.config.Backend.IsSwarmCompatible(); err != nil { + return nil, err + } + + actualLocalAddr := conf.LocalAddr + if actualLocalAddr == "" { + // If localAddr was not specified, resolve it automatically + // based on the route to joinAddr. localAddr can only be left + // empty on "join". + listenHost, _, err := net.SplitHostPort(conf.ListenAddr) + if err != nil { + return nil, fmt.Errorf("could not parse listen address: %v", err) + } + + listenAddrIP := net.ParseIP(listenHost) + if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { + actualLocalAddr = listenHost + } else { + if conf.RemoteAddr == "" { + // Should never happen except using swarms created by + // old versions that didn't save remoteAddr. + conf.RemoteAddr = "8.8.8.8:53" + } + conn, err := net.Dial("udp", conf.RemoteAddr) + if err != nil { + return nil, fmt.Errorf("could not find local IP address: %v", err) + } + localHostPort := conn.LocalAddr().String() + actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) + conn.Close() + } + } + + nr := &nodeRunner{cluster: c} + nr.actualLocalAddr = actualLocalAddr + + if err := nr.Start(conf); err != nil { + return nil, err + } + + c.config.Backend.DaemonJoinsCluster(c) + + return nr, nil +} + +func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost + return context.WithTimeout(context.Background(), swarmRequestTimeout) +} + +// IsManager returns true if Cluster is participating as a manager. +func (c *Cluster) IsManager() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().IsActiveManager() +} + +// IsAgent returns true if Cluster is participating as a worker/agent. +func (c *Cluster) IsAgent() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().status == types.LocalNodeStateActive +} + +// GetLocalAddress returns the local address. +func (c *Cluster) GetLocalAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().actualLocalAddr +} + +// GetListenAddress returns the listen address. +func (c *Cluster) GetListenAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil { + return c.nr.config.ListenAddr + } + return "" +} + +// GetAdvertiseAddress returns the remotely reachable address of this node. +func (c *Cluster) GetAdvertiseAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil && c.nr.config.AdvertiseAddr != "" { + advertiseHost, _, _ := net.SplitHostPort(c.nr.config.AdvertiseAddr) + return advertiseHost + } + return c.currentNodeState().actualLocalAddr +} + +// GetDataPathAddress returns the address to be used for the data path traffic, if specified. +func (c *Cluster) GetDataPathAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil { + return c.nr.config.DataPathAddr + } + return "" +} + +// GetRemoteAddressList returns the advertise address for each of the remote managers if +// available. +func (c *Cluster) GetRemoteAddressList() []string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.getRemoteAddressList() +} + +func (c *Cluster) getRemoteAddressList() []string { + state := c.currentNodeState() + if state.swarmNode == nil { + return []string{} + } + + nodeID := state.swarmNode.NodeID() + remotes := state.swarmNode.Remotes() + addressList := make([]string, 0, len(remotes)) + for _, r := range remotes { + if r.NodeID != nodeID { + addressList = append(addressList, r.Addr) + } + } + return addressList +} + +// ListenClusterEvents returns a channel that receives messages on cluster +// participation changes. +// todo: make cancelable and accessible to multiple callers +func (c *Cluster) ListenClusterEvents() <-chan lncluster.ConfigEventType { + return c.configEvent +} + +// currentNodeState should not be called without a read lock +func (c *Cluster) currentNodeState() nodeState { + return c.nr.State() +} + +// errNoManager returns error describing why manager commands can't be used. +// Call with read lock. +func (c *Cluster) errNoManager(st nodeState) error { + if st.swarmNode == nil { + if errors.Cause(st.err) == errSwarmLocked { + return errSwarmLocked + } + if st.err == errSwarmCertificatesExpired { + return errSwarmCertificatesExpired + } + return errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + if st.swarmNode.Manager() != nil { + return errors.New("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.") + } + return errors.New("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") +} + +// Cleanup stops active swarm node. This is run before daemon shutdown. +func (c *Cluster) Cleanup() { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.Lock() + node := c.nr + if node == nil { + c.mu.Unlock() + return + } + state := c.currentNodeState() + c.mu.Unlock() + + if state.IsActiveManager() { + active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) + if err == nil { + singlenode := active && isLastManager(reachable, unreachable) + if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { + logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) + } + } + } + + if err := node.Stop(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + } + + c.mu.Lock() + c.nr = nil + c.mu.Unlock() +} + +func managerStats(client swarmapi.ControlClient, currentNodeID string) (current bool, reachable int, unreachable int, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + nodes, err := client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) + if err != nil { + return false, 0, 0, err + } + for _, n := range nodes.Nodes { + if n.ManagerStatus != nil { + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { + reachable++ + if n.ID == currentNodeID { + current = true + } + } + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { + unreachable++ + } + } + } + return +} + +func detectLockedError(err error) error { + if err == swarmnode.ErrInvalidUnlockKey { + return errors.WithStack(errSwarmLocked) + } + return err +} + +func (c *Cluster) lockedManagerAction(fn func(ctx context.Context, state nodeState) error) error { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return c.errNoManager(state) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + return fn(ctx, state) +} + +// SendClusterEvent allows to send cluster events on the configEvent channel +// TODO This method should not be exposed. +// Currently it is used to notify the network controller that the keys are +// available +func (c *Cluster) SendClusterEvent(event lncluster.ConfigEventType) { + c.mu.RLock() + defer c.mu.RUnlock() + c.configEvent <- event +} diff --git a/daemon/cluster/configs.go b/daemon/cluster/configs.go new file mode 100644 index 0000000..3d418c1 --- /dev/null +++ b/daemon/cluster/configs.go @@ -0,0 +1,117 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetConfig returns a config from a managed swarm cluster +func (c *Cluster) GetConfig(input string) (types.Config, error) { + var config *swarmapi.Config + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + config = s + return nil + }); err != nil { + return types.Config{}, err + } + return convert.ConfigFromGRPC(config), nil +} + +// GetConfigs returns all configs of a managed swarm cluster. +func (c *Cluster) GetConfigs(options apitypes.ConfigListOptions) ([]types.Config, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListConfigsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListConfigs(ctx, + &swarmapi.ListConfigsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + configs := []types.Config{} + + for _, config := range r.Configs { + configs = append(configs, convert.ConfigFromGRPC(config)) + } + + return configs, nil +} + +// CreateConfig creates a new config in a managed swarm cluster. +func (c *Cluster) CreateConfig(s types.ConfigSpec) (string, error) { + var resp *swarmapi.CreateConfigResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + configSpec := convert.ConfigSpecToGRPC(s) + + r, err := state.controlClient.CreateConfig(ctx, + &swarmapi.CreateConfigRequest{Spec: &configSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + return resp.Config.ID, nil +} + +// RemoveConfig removes a config from a managed swarm cluster. +func (c *Cluster) RemoveConfig(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveConfigRequest{ + ConfigID: config.ID, + } + + _, err = state.controlClient.RemoveConfig(ctx, req) + return err + }) +} + +// UpdateConfig updates a config in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateConfig(input string, version uint64, spec types.ConfigSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + configSpec := convert.ConfigSpecToGRPC(spec) + + _, err = state.controlClient.UpdateConfig(ctx, + &swarmapi.UpdateConfigRequest{ + ConfigID: config.ID, + ConfigVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &configSpec, + }) + return err + }) +} diff --git a/daemon/cluster/controllers/plugin/controller.go b/daemon/cluster/controllers/plugin/controller.go new file mode 100644 index 0000000..de7eb2c --- /dev/null +++ b/daemon/cluster/controllers/plugin/controller.go @@ -0,0 +1,79 @@ +package plugin + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// Controller is the controller for the plugin backend +type Controller struct{} + +// NewController returns a new cluster plugin controller +func NewController() (*Controller, error) { + return &Controller{}, nil +} + +// Update is the update phase from swarmkit +func (p *Controller) Update(ctx context.Context, t *api.Task) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Update") + return nil +} + +// Prepare is the prepare phase from swarmkit +func (p *Controller) Prepare(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Prepare") + return nil +} + +// Start is the start phase from swarmkit +func (p *Controller) Start(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Start") + return nil +} + +// Wait causes the task to wait until returned +func (p *Controller) Wait(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Wait") + return nil +} + +// Shutdown is the shutdown phase from swarmkit +func (p *Controller) Shutdown(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Shutdown") + return nil +} + +// Terminate is the terminate phase from swarmkit +func (p *Controller) Terminate(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Terminate") + return nil +} + +// Remove is the remove phase from swarmkit +func (p *Controller) Remove(ctx context.Context) error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Remove") + return nil +} + +// Close is the close phase from swarmkit +func (p *Controller) Close() error { + logrus.WithFields(logrus.Fields{ + "controller": "plugin", + }).Debug("Close") + return nil +} diff --git a/daemon/cluster/convert/config.go b/daemon/cluster/convert/config.go new file mode 100644 index 0000000..6b28712 --- /dev/null +++ b/daemon/cluster/convert/config.go @@ -0,0 +1,61 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// ConfigFromGRPC converts a grpc Config to a Config. +func ConfigFromGRPC(s *swarmapi.Config) swarmtypes.Config { + config := swarmtypes.Config{ + ID: s.ID, + Spec: swarmtypes.ConfigSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + }, + } + + config.Version.Index = s.Meta.Version.Index + // Meta + config.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + config.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + return config +} + +// ConfigSpecToGRPC converts Config to a grpc Config. +func ConfigSpecToGRPC(s swarmtypes.ConfigSpec) swarmapi.ConfigSpec { + return swarmapi.ConfigSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// ConfigReferencesFromGRPC converts a slice of grpc ConfigReference to ConfigReference +func ConfigReferencesFromGRPC(s []*swarmapi.ConfigReference) []*swarmtypes.ConfigReference { + refs := []*swarmtypes.ConfigReference{} + + for _, r := range s { + ref := &swarmtypes.ConfigReference{ + ConfigID: r.ConfigID, + ConfigName: r.ConfigName, + } + + if t, ok := r.Target.(*swarmapi.ConfigReference_File); ok { + ref.File = &swarmtypes.ConfigReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/daemon/cluster/convert/container.go b/daemon/cluster/convert/container.go new file mode 100644 index 0000000..a468c5f --- /dev/null +++ b/daemon/cluster/convert/container.go @@ -0,0 +1,353 @@ +package convert + +import ( + "errors" + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + container "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { + containerSpec := types.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesFromGRPC(c.Secrets), + Configs: configReferencesFromGRPC(c.Configs), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &types.DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &types.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = &types.CredentialSpec{} + switch c.Privileges.CredentialSpec.Source.(type) { + case *swarmapi.Privileges_CredentialSpec_File: + containerSpec.Privileges.CredentialSpec.File = c.Privileges.CredentialSpec.GetFile() + case *swarmapi.Privileges_CredentialSpec_Registry: + containerSpec.Privileges.CredentialSpec.Registry = c.Privileges.CredentialSpec.GetRegistry() + } + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &types.SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + + // Mounts + for _, m := range c.Mounts { + mount := mounttypes.Mount{ + Target: m.Target, + Source: m.Source, + Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), + ReadOnly: m.ReadOnly, + } + + if m.BindOptions != nil { + mount.BindOptions = &mounttypes.BindOptions{ + Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &mounttypes.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.StopGracePeriod != nil { + grace, _ := gogotypes.DurationFromProto(c.StopGracePeriod) + containerSpec.StopGracePeriod = &grace + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck) + } + + return containerSpec +} + +func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { + refs := make([]*swarmapi.SecretReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.SecretReference{ + SecretID: s.SecretID, + SecretName: s.SecretName, + } + if s.File != nil { + ref.Target = &swarmapi.SecretReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { + refs := make([]*types.SecretReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("secret target not a file: secret=%s", s.SecretID) + continue + } + refs = append(refs, &types.SecretReference{ + File: &types.SecretReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + SecretID: s.SecretID, + SecretName: s.SecretName, + }) + } + + return refs +} + +func configReferencesToGRPC(sr []*types.ConfigReference) []*swarmapi.ConfigReference { + refs := make([]*swarmapi.ConfigReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.ConfigReference{ + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + } + if s.File != nil { + ref.Target = &swarmapi.ConfigReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference { + refs := make([]*types.ConfigReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("config target not a file: config=%s", s.ConfigID) + continue + } + refs = append(refs, &types.ConfigReference{ + File: &types.ConfigReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + }) + } + + return refs +} + +func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { + containerSpec := &swarmapi.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesToGRPC(c.Secrets), + Configs: configReferencesToGRPC(c.Configs), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + if c.StopGracePeriod != nil { + containerSpec.StopGracePeriod = gogotypes.DurationProto(*c.StopGracePeriod) + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &swarmapi.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = &swarmapi.Privileges_CredentialSpec{} + + if c.Privileges.CredentialSpec.File != "" && c.Privileges.CredentialSpec.Registry != "" { + return nil, errors.New("cannot specify both \"file\" and \"registry\" credential specs") + } + if c.Privileges.CredentialSpec.File != "" { + containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_File{ + File: c.Privileges.CredentialSpec.File, + } + } else if c.Privileges.CredentialSpec.Registry != "" { + containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_Registry{ + Registry: c.Privileges.CredentialSpec.Registry, + } + } else { + return nil, errors.New("must either provide \"file\" or \"registry\" for credential spec") + } + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &swarmapi.Privileges_SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + + // Mounts + for _, m := range c.Mounts { + mount := swarmapi.Mount{ + Target: m.Target, + Source: m.Source, + ReadOnly: m.ReadOnly, + } + + if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { + mount.Type = swarmapi.Mount_MountType(mountType) + } else if string(m.Type) != "" { + return nil, fmt.Errorf("invalid MountType: %q", m.Type) + } + + if m.BindOptions != nil { + if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { + mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} + } else if string(m.BindOptions.Propagation) != "" { + return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck) + } + + return containerSpec, nil +} + +func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { + interval, _ := gogotypes.DurationFromProto(h.Interval) + timeout, _ := gogotypes.DurationFromProto(h.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(h.StartPeriod) + return &container.HealthConfig{ + Test: h.Test, + Interval: interval, + Timeout: timeout, + Retries: int(h.Retries), + StartPeriod: startPeriod, + } +} + +func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { + return &swarmapi.HealthConfig{ + Test: h.Test, + Interval: gogotypes.DurationProto(h.Interval), + Timeout: gogotypes.DurationProto(h.Timeout), + Retries: int32(h.Retries), + StartPeriod: gogotypes.DurationProto(h.StartPeriod), + } +} diff --git a/daemon/cluster/convert/network.go b/daemon/cluster/convert/network.go new file mode 100644 index 0000000..6f8b793 --- /dev/null +++ b/daemon/cluster/convert/network.go @@ -0,0 +1,239 @@ +package convert + +import ( + "strings" + + basictypes "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + netconst "github.com/docker/libnetwork/datastore" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { + if na != nil { + return types.NetworkAttachment{ + Network: networkFromGRPC(na.Network), + Addresses: na.Addresses, + } + } + return types.NetworkAttachment{} +} + +func networkFromGRPC(n *swarmapi.Network) types.Network { + if n != nil { + network := types.Network{ + ID: n.ID, + Spec: types.NetworkSpec{ + IPv6Enabled: n.Spec.Ipv6Enabled, + Internal: n.Spec.Internal, + Attachable: n.Spec.Attachable, + Ingress: IsIngressNetwork(n), + IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + Scope: netconst.SwarmScope, + }, + IPAMOptions: ipamFromGRPC(n.IPAM), + } + + if n.Spec.GetNetwork() != "" { + network.Spec.ConfigFrom = &networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + + // Meta + network.Version.Index = n.Meta.Version.Index + network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) + + //Annotations + network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) + + //DriverConfiguration + if n.Spec.DriverConfig != nil { + network.Spec.DriverConfiguration = &types.Driver{ + Name: n.Spec.DriverConfig.Name, + Options: n.Spec.DriverConfig.Options, + } + } + + //DriverState + if n.DriverState != nil { + network.DriverState = types.Driver{ + Name: n.DriverState.Name, + Options: n.DriverState.Options, + } + } + + return network + } + return types.Network{} +} + +func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { + var ipam *types.IPAMOptions + if i != nil { + ipam = &types.IPAMOptions{} + if i.Driver != nil { + ipam.Driver.Name = i.Driver.Name + ipam.Driver.Options = i.Driver.Options + } + + for _, config := range i.Configs { + ipam.Configs = append(ipam.Configs, types.IPAMConfig{ + Subnet: config.Subnet, + Range: config.Range, + Gateway: config.Gateway, + }) + } + } + return ipam +} + +func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { + var endpointSpec *types.EndpointSpec + if es != nil { + endpointSpec = &types.EndpointSpec{} + endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) + + for _, portState := range es.Ports { + endpointSpec.Ports = append(endpointSpec.Ports, swarmPortConfigToAPIPortConfig(portState)) + } + } + return endpointSpec +} + +func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { + endpoint := types.Endpoint{} + if e != nil { + if espec := endpointSpecFromGRPC(e.Spec); espec != nil { + endpoint.Spec = *espec + } + + for _, portState := range e.Ports { + endpoint.Ports = append(endpoint.Ports, swarmPortConfigToAPIPortConfig(portState)) + } + + for _, v := range e.VirtualIPs { + endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ + NetworkID: v.NetworkID, + Addr: v.Addr}) + } + + } + + return endpoint +} + +func swarmPortConfigToAPIPortConfig(portConfig *swarmapi.PortConfig) types.PortConfig { + return types.PortConfig{ + Name: portConfig.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portConfig.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portConfig.PublishMode)])), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + } +} + +// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. +func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { + spec := n.Spec + var ipam networktypes.IPAM + if spec.IPAM != nil { + if spec.IPAM.Driver != nil { + ipam.Driver = spec.IPAM.Driver.Name + ipam.Options = spec.IPAM.Driver.Options + } + ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) + for _, ic := range spec.IPAM.Configs { + ipamConfig := networktypes.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + AuxAddress: ic.Reserved, + } + ipam.Config = append(ipam.Config, ipamConfig) + } + } + + nr := basictypes.NetworkResource{ + ID: n.ID, + Name: n.Spec.Annotations.Name, + Scope: netconst.SwarmScope, + EnableIPv6: spec.Ipv6Enabled, + IPAM: ipam, + Internal: spec.Internal, + Attachable: spec.Attachable, + Ingress: IsIngressNetwork(&n), + Labels: n.Spec.Annotations.Labels, + } + + if n.Spec.GetNetwork() != "" { + nr.ConfigFrom = networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + + if n.DriverState != nil { + nr.Driver = n.DriverState.Name + nr.Options = n.DriverState.Options + } + + return nr +} + +// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. +func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { + ns := swarmapi.NetworkSpec{ + Annotations: swarmapi.Annotations{ + Name: create.Name, + Labels: create.Labels, + }, + DriverConfig: &swarmapi.Driver{ + Name: create.Driver, + Options: create.Options, + }, + Ipv6Enabled: create.EnableIPv6, + Internal: create.Internal, + Attachable: create.Attachable, + Ingress: create.Ingress, + } + if create.IPAM != nil { + driver := create.IPAM.Driver + if driver == "" { + driver = "default" + } + ns.IPAM = &swarmapi.IPAMOptions{ + Driver: &swarmapi.Driver{ + Name: driver, + Options: create.IPAM.Options, + }, + } + ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) + for _, ipamConfig := range create.IPAM.Config { + ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ + Subnet: ipamConfig.Subnet, + Range: ipamConfig.IPRange, + Gateway: ipamConfig.Gateway, + }) + } + ns.IPAM.Configs = ipamSpec + } + if create.ConfigFrom != nil { + ns.ConfigFrom = &swarmapi.NetworkSpec_Network{ + Network: create.ConfigFrom.Network, + } + } + return ns +} + +// IsIngressNetwork check if the swarm network is an ingress network +func IsIngressNetwork(n *swarmapi.Network) bool { + if n.Spec.Ingress { + return true + } + // Check if legacy defined ingress network + _, ok := n.Spec.Annotations.Labels["com.docker.swarm.internal"] + return ok && n.Spec.Annotations.Name == "ingress" +} diff --git a/daemon/cluster/convert/node.go b/daemon/cluster/convert/node.go new file mode 100644 index 0000000..f075783 --- /dev/null +++ b/daemon/cluster/convert/node.go @@ -0,0 +1,93 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// NodeFromGRPC converts a grpc Node to a Node. +func NodeFromGRPC(n swarmapi.Node) types.Node { + node := types.Node{ + ID: n.ID, + Spec: types.NodeSpec{ + Role: types.NodeRole(strings.ToLower(n.Spec.DesiredRole.String())), + Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), + }, + Status: types.NodeStatus{ + State: types.NodeState(strings.ToLower(n.Status.State.String())), + Message: n.Status.Message, + Addr: n.Status.Addr, + }, + } + + // Meta + node.Version.Index = n.Meta.Version.Index + node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) + + //Annotations + node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) + + //Description + if n.Description != nil { + node.Description.Hostname = n.Description.Hostname + if n.Description.Platform != nil { + node.Description.Platform.Architecture = n.Description.Platform.Architecture + node.Description.Platform.OS = n.Description.Platform.OS + } + if n.Description.Resources != nil { + node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs + node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes + } + if n.Description.Engine != nil { + node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion + node.Description.Engine.Labels = n.Description.Engine.Labels + for _, plugin := range n.Description.Engine.Plugins { + node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) + } + } + if n.Description.TLSInfo != nil { + node.Description.TLSInfo.TrustRoot = string(n.Description.TLSInfo.TrustRoot) + node.Description.TLSInfo.CertIssuerPublicKey = n.Description.TLSInfo.CertIssuerPublicKey + node.Description.TLSInfo.CertIssuerSubject = n.Description.TLSInfo.CertIssuerSubject + } + } + + //Manager + if n.ManagerStatus != nil { + node.ManagerStatus = &types.ManagerStatus{ + Leader: n.ManagerStatus.Leader, + Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), + Addr: n.ManagerStatus.Addr, + } + } + + return node +} + +// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. +func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { + spec := swarmapi.NodeSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + } + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { + spec.DesiredRole = swarmapi.NodeRole(role) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) + } + + if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { + spec.Availability = swarmapi.NodeSpec_Availability(availability) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) + } + + return spec, nil +} diff --git a/daemon/cluster/convert/secret.go b/daemon/cluster/convert/secret.go new file mode 100644 index 0000000..91d6773 --- /dev/null +++ b/daemon/cluster/convert/secret.go @@ -0,0 +1,61 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// SecretFromGRPC converts a grpc Secret to a Secret. +func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { + secret := swarmtypes.Secret{ + ID: s.ID, + Spec: swarmtypes.SecretSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + }, + } + + secret.Version.Index = s.Meta.Version.Index + // Meta + secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + return secret +} + +// SecretSpecToGRPC converts Secret to a grpc Secret. +func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { + return swarmapi.SecretSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference +func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { + refs := []*swarmtypes.SecretReference{} + + for _, r := range s { + ref := &swarmtypes.SecretReference{ + SecretID: r.SecretID, + SecretName: r.SecretName, + } + + if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { + ref.File = &swarmtypes.SecretReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/daemon/cluster/convert/service.go b/daemon/cluster/convert/service.go new file mode 100644 index 0000000..3ab2129 --- /dev/null +++ b/daemon/cluster/convert/service.go @@ -0,0 +1,532 @@ +package convert + +import ( + "errors" + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/namesgenerator" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +var ( + // ErrUnsupportedRuntime returns an error if the runtime is not supported by the daemon + ErrUnsupportedRuntime = errors.New("unsupported runtime") +) + +// ServiceFromGRPC converts a grpc Service to a Service. +func ServiceFromGRPC(s swarmapi.Service) (types.Service, error) { + curSpec, err := serviceSpecFromGRPC(&s.Spec) + if err != nil { + return types.Service{}, err + } + prevSpec, err := serviceSpecFromGRPC(s.PreviousSpec) + if err != nil { + return types.Service{}, err + } + service := types.Service{ + ID: s.ID, + Spec: *curSpec, + PreviousSpec: prevSpec, + + Endpoint: endpointFromGRPC(s.Endpoint), + } + + // Meta + service.Version.Index = s.Meta.Version.Index + service.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + service.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + // UpdateStatus + if s.UpdateStatus != nil { + service.UpdateStatus = &types.UpdateStatus{} + switch s.UpdateStatus.State { + case swarmapi.UpdateStatus_UPDATING: + service.UpdateStatus.State = types.UpdateStateUpdating + case swarmapi.UpdateStatus_PAUSED: + service.UpdateStatus.State = types.UpdateStatePaused + case swarmapi.UpdateStatus_COMPLETED: + service.UpdateStatus.State = types.UpdateStateCompleted + case swarmapi.UpdateStatus_ROLLBACK_STARTED: + service.UpdateStatus.State = types.UpdateStateRollbackStarted + case swarmapi.UpdateStatus_ROLLBACK_PAUSED: + service.UpdateStatus.State = types.UpdateStateRollbackPaused + case swarmapi.UpdateStatus_ROLLBACK_COMPLETED: + service.UpdateStatus.State = types.UpdateStateRollbackCompleted + } + + startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt) + if !startedAt.IsZero() && startedAt.Unix() != 0 { + service.UpdateStatus.StartedAt = &startedAt + } + + completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt) + if !completedAt.IsZero() && completedAt.Unix() != 0 { + service.UpdateStatus.CompletedAt = &completedAt + } + + service.UpdateStatus.Message = s.UpdateStatus.Message + } + + return service, nil +} + +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error) { + if spec == nil { + return nil, nil + } + + serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) + for _, n := range spec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + serviceNetworks = append(serviceNetworks, netConfig) + + } + + taskTemplate := taskSpecFromGRPC(spec.Task) + + switch t := spec.Task.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container: + containerConfig := t.Container + taskTemplate.ContainerSpec = containerSpecFromGRPC(containerConfig) + taskTemplate.Runtime = types.RuntimeContainer + case *swarmapi.TaskSpec_Generic: + switch t.Generic.Kind { + case string(types.RuntimePlugin): + taskTemplate.Runtime = types.RuntimePlugin + default: + return nil, fmt.Errorf("unknown task runtime type: %s", t.Generic.Payload.TypeUrl) + } + + default: + return nil, fmt.Errorf("error creating service; unsupported runtime %T", t) + } + + convertedSpec := &types.ServiceSpec{ + Annotations: annotationsFromGRPC(spec.Annotations), + TaskTemplate: taskTemplate, + Networks: serviceNetworks, + EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), + } + + // UpdateConfig + convertedSpec.UpdateConfig = updateConfigFromGRPC(spec.Update) + convertedSpec.RollbackConfig = updateConfigFromGRPC(spec.Rollback) + + // Mode + switch t := spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + convertedSpec.Mode.Global = &types.GlobalService{} + case *swarmapi.ServiceSpec_Replicated: + convertedSpec.Mode.Replicated = &types.ReplicatedService{ + Replicas: &t.Replicated.Replicas, + } + } + + return convertedSpec, nil +} + +// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. +func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { + name := s.Name + if name == "" { + name = namesgenerator.GetRandomName(0) + } + + serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) + for _, n := range s.Networks { + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + serviceNetworks = append(serviceNetworks, netConfig) + } + + taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) + for _, n := range s.TaskTemplate.Networks { + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + taskNetworks = append(taskNetworks, netConfig) + + } + + spec := swarmapi.ServiceSpec{ + Annotations: swarmapi.Annotations{ + Name: name, + Labels: s.Labels, + }, + Task: swarmapi.TaskSpec{ + Resources: resourcesToGRPC(s.TaskTemplate.Resources), + LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), + Networks: taskNetworks, + ForceUpdate: s.TaskTemplate.ForceUpdate, + }, + Networks: serviceNetworks, + } + + switch s.TaskTemplate.Runtime { + case types.RuntimeContainer, "": // if empty runtime default to container + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + case types.RuntimePlugin: + spec.Task.Runtime = &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: string(types.RuntimePlugin), + Payload: &gogotypes.Any{ + TypeUrl: string(types.RuntimeURLPlugin), + }, + }, + } + default: + return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime + } + + restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Restart = restartPolicy + + if s.TaskTemplate.Placement != nil { + var preferences []*swarmapi.PlacementPreference + for _, pref := range s.TaskTemplate.Placement.Preferences { + if pref.Spread != nil { + preferences = append(preferences, &swarmapi.PlacementPreference{ + Preference: &swarmapi.PlacementPreference_Spread{ + Spread: &swarmapi.SpreadOver{ + SpreadDescriptor: pref.Spread.SpreadDescriptor, + }, + }, + }) + } + } + var platforms []*swarmapi.Platform + for _, plat := range s.TaskTemplate.Placement.Platforms { + platforms = append(platforms, &swarmapi.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + spec.Task.Placement = &swarmapi.Placement{ + Constraints: s.TaskTemplate.Placement.Constraints, + Preferences: preferences, + Platforms: platforms, + } + } + + spec.Update, err = updateConfigToGRPC(s.UpdateConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Rollback, err = updateConfigToGRPC(s.RollbackConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + + if s.EndpointSpec != nil { + if s.EndpointSpec.Mode != "" && + s.EndpointSpec.Mode != types.ResolutionModeVIP && + s.EndpointSpec.Mode != types.ResolutionModeDNSRR { + return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) + } + + spec.Endpoint = &swarmapi.EndpointSpec{} + + spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) + + for _, portConfig := range s.EndpointSpec.Ports { + spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ + Name: portConfig.Name, + Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), + PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + } + + // Mode + if s.Mode.Global != nil && s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode") + } + + if s.Mode.Global != nil { + spec.Mode = &swarmapi.ServiceSpec_Global{ + Global: &swarmapi.GlobalService{}, + } + } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, + } + } else { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: 1}, + } + } + + return spec, nil +} + +func annotationsFromGRPC(ann swarmapi.Annotations) types.Annotations { + a := types.Annotations{ + Name: ann.Name, + Labels: ann.Labels, + } + + if a.Labels == nil { + a.Labels = make(map[string]string) + } + + return a +} + +func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { + var resources *types.ResourceRequirements + if res != nil { + resources = &types.ResourceRequirements{} + if res.Limits != nil { + resources.Limits = &types.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + resources.Reservations = &types.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + } + } + + return resources +} + +func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { + var reqs *swarmapi.ResourceRequirements + if res != nil { + reqs = &swarmapi.ResourceRequirements{} + if res.Limits != nil { + reqs.Limits = &swarmapi.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + reqs.Reservations = &swarmapi.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + + } + } + return reqs +} + +func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { + var rp *types.RestartPolicy + if p != nil { + rp = &types.RestartPolicy{} + + switch p.Condition { + case swarmapi.RestartOnNone: + rp.Condition = types.RestartPolicyConditionNone + case swarmapi.RestartOnFailure: + rp.Condition = types.RestartPolicyConditionOnFailure + case swarmapi.RestartOnAny: + rp.Condition = types.RestartPolicyConditionAny + default: + rp.Condition = types.RestartPolicyConditionAny + } + + if p.Delay != nil { + delay, _ := gogotypes.DurationFromProto(p.Delay) + rp.Delay = &delay + } + if p.Window != nil { + window, _ := gogotypes.DurationFromProto(p.Window) + rp.Window = &window + } + + rp.MaxAttempts = &p.MaxAttempts + } + return rp +} + +func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { + var rp *swarmapi.RestartPolicy + if p != nil { + rp = &swarmapi.RestartPolicy{} + + switch p.Condition { + case types.RestartPolicyConditionNone: + rp.Condition = swarmapi.RestartOnNone + case types.RestartPolicyConditionOnFailure: + rp.Condition = swarmapi.RestartOnFailure + case types.RestartPolicyConditionAny: + rp.Condition = swarmapi.RestartOnAny + default: + if string(p.Condition) != "" { + return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) + } + rp.Condition = swarmapi.RestartOnAny + } + + if p.Delay != nil { + rp.Delay = gogotypes.DurationProto(*p.Delay) + } + if p.Window != nil { + rp.Window = gogotypes.DurationProto(*p.Window) + } + if p.MaxAttempts != nil { + rp.MaxAttempts = *p.MaxAttempts + + } + } + return rp, nil +} + +func placementFromGRPC(p *swarmapi.Placement) *types.Placement { + if p == nil { + return nil + } + r := &types.Placement{ + Constraints: p.Constraints, + } + + for _, pref := range p.Preferences { + if spread := pref.GetSpread(); spread != nil { + r.Preferences = append(r.Preferences, types.PlacementPreference{ + Spread: &types.SpreadOver{ + SpreadDescriptor: spread.SpreadDescriptor, + }, + }) + } + } + + for _, plat := range p.Platforms { + r.Platforms = append(r.Platforms, types.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + + return r +} + +func driverFromGRPC(p *swarmapi.Driver) *types.Driver { + if p == nil { + return nil + } + + return &types.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func driverToGRPC(p *types.Driver) *swarmapi.Driver { + if p == nil { + return nil + } + + return &swarmapi.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func updateConfigFromGRPC(updateConfig *swarmapi.UpdateConfig) *types.UpdateConfig { + if updateConfig == nil { + return nil + } + + converted := &types.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + converted.Delay = updateConfig.Delay + if updateConfig.Monitor != nil { + converted.Monitor, _ = gogotypes.DurationFromProto(updateConfig.Monitor) + } + + switch updateConfig.FailureAction { + case swarmapi.UpdateConfig_PAUSE: + converted.FailureAction = types.UpdateFailureActionPause + case swarmapi.UpdateConfig_CONTINUE: + converted.FailureAction = types.UpdateFailureActionContinue + case swarmapi.UpdateConfig_ROLLBACK: + converted.FailureAction = types.UpdateFailureActionRollback + } + + switch updateConfig.Order { + case swarmapi.UpdateConfig_STOP_FIRST: + converted.Order = types.UpdateOrderStopFirst + case swarmapi.UpdateConfig_START_FIRST: + converted.Order = types.UpdateOrderStartFirst + } + + return converted +} + +func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfig, error) { + if updateConfig == nil { + return nil, nil + } + + converted := &swarmapi.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + Delay: updateConfig.Delay, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + switch updateConfig.FailureAction { + case types.UpdateFailureActionPause, "": + converted.FailureAction = swarmapi.UpdateConfig_PAUSE + case types.UpdateFailureActionContinue: + converted.FailureAction = swarmapi.UpdateConfig_CONTINUE + case types.UpdateFailureActionRollback: + converted.FailureAction = swarmapi.UpdateConfig_ROLLBACK + default: + return nil, fmt.Errorf("unrecognized update failure action %s", updateConfig.FailureAction) + } + if updateConfig.Monitor != 0 { + converted.Monitor = gogotypes.DurationProto(updateConfig.Monitor) + } + + switch updateConfig.Order { + case types.UpdateOrderStopFirst, "": + converted.Order = swarmapi.UpdateConfig_STOP_FIRST + case types.UpdateOrderStartFirst: + converted.Order = swarmapi.UpdateConfig_START_FIRST + default: + return nil, fmt.Errorf("unrecognized update order %s", updateConfig.Order) + } + + return converted, nil +} + +func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) types.TaskSpec { + taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks)) + for _, n := range taskSpec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + taskNetworks = append(taskNetworks, netConfig) + } + + c := taskSpec.GetContainer() + cSpec := types.ContainerSpec{} + if c != nil { + cSpec = containerSpecFromGRPC(c) + } + + return types.TaskSpec{ + ContainerSpec: cSpec, + Resources: resourcesFromGRPC(taskSpec.Resources), + RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart), + Placement: placementFromGRPC(taskSpec.Placement), + LogDriver: driverFromGRPC(taskSpec.LogDriver), + Networks: taskNetworks, + ForceUpdate: taskSpec.ForceUpdate, + } +} diff --git a/daemon/cluster/convert/service_test.go b/daemon/cluster/convert/service_test.go new file mode 100644 index 0000000..92d80d3 --- /dev/null +++ b/daemon/cluster/convert/service_test.go @@ -0,0 +1,148 @@ +package convert + +import ( + "testing" + + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + google_protobuf3 "github.com/gogo/protobuf/types" +) + +func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) { + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Container{ + Container: &swarmapi.ContainerSpec{ + Image: "alpine:latest", + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimeContainer { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimeContainer, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertFromGRPCGenericRuntimePlugin(t *testing.T) { + kind := string(swarmtypes.RuntimePlugin) + url := swarmtypes.RuntimeURLPlugin + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: kind, + Payload: &google_protobuf3.Any{ + TypeUrl: string(url), + }, + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimePlugin { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimePlugin, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertToGRPCGenericRuntimePlugin(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: swarmtypes.RuntimePlugin, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Generic) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Generic") + } + + if v.Generic.Payload.TypeUrl != string(swarmtypes.RuntimeURLPlugin) { + t.Fatalf("expected url %s; received %s", swarmtypes.RuntimeURLPlugin, v.Generic.Payload.TypeUrl) + } +} + +func TestServiceConvertToGRPCContainerRuntime(t *testing.T) { + image := "alpine:latest" + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + ContainerSpec: swarmtypes.ContainerSpec{ + Image: image, + }, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Container) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Container") + } + + if v.Container.Image != image { + t.Fatalf("expected image %s; received %s", image, v.Container.Image) + } +} + +func TestServiceConvertToGRPCGenericRuntimeCustom(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: "customruntime", + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + if _, err := ServiceSpecToGRPC(s); err != ErrUnsupportedRuntime { + t.Fatal(err) + } +} diff --git a/daemon/cluster/convert/swarm.go b/daemon/cluster/convert/swarm.go new file mode 100644 index 0000000..2ea89b9 --- /dev/null +++ b/daemon/cluster/convert/swarm.go @@ -0,0 +1,148 @@ +package convert + +import ( + "fmt" + "strings" + "time" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + gogotypes "github.com/gogo/protobuf/types" +) + +// SwarmFromGRPC converts a grpc Cluster to a Swarm. +func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { + swarm := types.Swarm{ + ClusterInfo: types.ClusterInfo{ + ID: c.ID, + Spec: types.Spec{ + Orchestration: types.OrchestrationConfig{ + TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, + }, + Raft: types.RaftConfig{ + SnapshotInterval: c.Spec.Raft.SnapshotInterval, + KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, + LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, + HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), + ElectionTick: int(c.Spec.Raft.ElectionTick), + }, + EncryptionConfig: types.EncryptionConfig{ + AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, + }, + CAConfig: types.CAConfig{ + // do not include the signing CA cert or key (it should already be redacted via the swarm APIs) - + // the key because it's secret, and the cert because otherwise doing a get + update on the spec + // can cause issues because the key would be missing and the cert wouldn't + ForceRotate: c.Spec.CAConfig.ForceRotate, + }, + }, + TLSInfo: types.TLSInfo{ + TrustRoot: string(c.RootCA.CACert), + }, + RootRotationInProgress: c.RootCA.RootRotation != nil, + }, + JoinTokens: types.JoinTokens{ + Worker: c.RootCA.JoinTokens.Worker, + Manager: c.RootCA.JoinTokens.Manager, + }, + } + + issuerInfo, err := ca.IssuerFromAPIRootCA(&c.RootCA) + if err == nil && issuerInfo != nil { + swarm.TLSInfo.CertIssuerSubject = issuerInfo.Subject + swarm.TLSInfo.CertIssuerPublicKey = issuerInfo.PublicKey + } + + heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod) + swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod + + swarm.Spec.CAConfig.NodeCertExpiry, _ = gogotypes.DurationFromProto(c.Spec.CAConfig.NodeCertExpiry) + + for _, ca := range c.Spec.CAConfig.ExternalCAs { + swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ + Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), + URL: ca.URL, + Options: ca.Options, + CACert: string(ca.CACert), + }) + } + + // Meta + swarm.Version.Index = c.Meta.Version.Index + swarm.CreatedAt, _ = gogotypes.TimestampFromProto(c.Meta.CreatedAt) + swarm.UpdatedAt, _ = gogotypes.TimestampFromProto(c.Meta.UpdatedAt) + + // Annotations + swarm.Spec.Annotations = annotationsFromGRPC(c.Spec.Annotations) + + return swarm +} + +// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. +func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { + return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{}) +} + +// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec +func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { + // We take the initSpec (either created from scratch, or returned by swarmkit), + // and will only change the value if the one taken from types.Spec is not nil or 0. + // In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo. + if s.Annotations.Name != "" { + spec.Annotations.Name = s.Annotations.Name + } + if len(s.Annotations.Labels) != 0 { + spec.Annotations.Labels = s.Annotations.Labels + } + + if s.Orchestration.TaskHistoryRetentionLimit != nil { + spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit + } + if s.Raft.SnapshotInterval != 0 { + spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval + } + if s.Raft.KeepOldSnapshots != nil { + spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots + } + if s.Raft.LogEntriesForSlowFollowers != 0 { + spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers + } + if s.Raft.HeartbeatTick != 0 { + spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick) + } + if s.Raft.ElectionTick != 0 { + spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) + } + if s.Dispatcher.HeartbeatPeriod != 0 { + spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)) + } + if s.CAConfig.NodeCertExpiry != 0 { + spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry) + } + if s.CAConfig.SigningCACert != "" { + spec.CAConfig.SigningCACert = []byte(s.CAConfig.SigningCACert) + } + if s.CAConfig.SigningCAKey != "" { + // do propagate the signing CA key here because we want to provide it TO the swarm APIs + spec.CAConfig.SigningCAKey = []byte(s.CAConfig.SigningCAKey) + } + spec.CAConfig.ForceRotate = s.CAConfig.ForceRotate + + for _, ca := range s.CAConfig.ExternalCAs { + protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] + if !ok { + return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) + } + spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ + Protocol: swarmapi.ExternalCA_CAProtocol(protocol), + URL: ca.URL, + Options: ca.Options, + CACert: []byte(ca.CACert), + }) + } + + spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers + + return spec, nil +} diff --git a/daemon/cluster/convert/task.go b/daemon/cluster/convert/task.go new file mode 100644 index 0000000..b90d24e --- /dev/null +++ b/daemon/cluster/convert/task.go @@ -0,0 +1,66 @@ +package convert + +import ( + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// TaskFromGRPC converts a grpc Task to a Task. +func TaskFromGRPC(t swarmapi.Task) types.Task { + if t.Spec.GetAttachment() != nil { + return types.Task{} + } + containerStatus := t.Status.GetContainer() + + task := types.Task{ + ID: t.ID, + Annotations: annotationsFromGRPC(t.Annotations), + ServiceID: t.ServiceID, + Slot: int(t.Slot), + NodeID: t.NodeID, + Spec: taskSpecFromGRPC(t.Spec), + Status: types.TaskStatus{ + State: types.TaskState(strings.ToLower(t.Status.State.String())), + Message: t.Status.Message, + Err: t.Status.Err, + }, + DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + } + + // Meta + task.Version.Index = t.Meta.Version.Index + task.CreatedAt, _ = gogotypes.TimestampFromProto(t.Meta.CreatedAt) + task.UpdatedAt, _ = gogotypes.TimestampFromProto(t.Meta.UpdatedAt) + + task.Status.Timestamp, _ = gogotypes.TimestampFromProto(t.Status.Timestamp) + + if containerStatus != nil { + task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID + task.Status.ContainerStatus.PID = int(containerStatus.PID) + task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) + } + + // NetworksAttachments + for _, na := range t.Networks { + task.NetworksAttachments = append(task.NetworksAttachments, networkAttachmentFromGRPC(na)) + } + + if t.Status.PortStatus == nil { + return task + } + + for _, p := range t.Status.PortStatus.Ports { + task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{ + Name: p.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])), + TargetPort: p.TargetPort, + PublishedPort: p.PublishedPort, + }) + } + + return task +} diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go new file mode 100644 index 0000000..fbe9006 --- /dev/null +++ b/daemon/cluster/executor/backend.go @@ -0,0 +1,64 @@ +package executor + +import ( + "io" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + containerpkg "github.com/docker/docker/container" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/plugin" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "golang.org/x/net/context" +) + +// Backend defines the executor component for a swarm agent. +type Backend interface { + CreateManagedNetwork(clustertypes.NetworkCreateRequest) error + DeleteManagedNetwork(name string) error + FindNetwork(idName string) (libnetwork.Network, error) + SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error) + ReleaseIngress() (<-chan struct{}, error) + PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerLogs(context.Context, string, *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + ActivateContainerServiceBinding(containerName string) error + DeactivateContainerServiceBinding(containerName string) error + UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error + ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerKill(name string, sig uint64) error + SetContainerDependencyStore(name string, store exec.DependencyGetter) error + SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error + SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error + SystemInfo() (*types.Info, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + Containers(config *types.ContainerListOptions) ([]*types.Container, error) + SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error + DaemonJoinsCluster(provider cluster.Provider) + DaemonLeavesCluster() + IsSwarmCompatible() error + SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(listener chan interface{}) + UpdateAttachment(string, string, string, *network.NetworkingConfig) error + WaitForDetachment(context.Context, string, string, string, string) error + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) + LookupImage(name string) (*types.ImageInspect, error) + PluginManager() *plugin.Manager + PluginGetter() *plugin.Store +} diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go new file mode 100644 index 0000000..92e4947 --- /dev/null +++ b/daemon/cluster/executor/container/adapter.go @@ -0,0 +1,472 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// containerAdapter conducts remote operations for a container. All calls +// are mostly naked calls to the client API, seeded with information from +// containerConfig. +type containerAdapter struct { + backend executorpkg.Backend + container *containerConfig + dependencies exec.DependencyGetter +} + +func newContainerAdapter(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(task) + if err != nil { + return nil, err + } + + return &containerAdapter{ + container: ctnr, + backend: b, + dependencies: dependencies, + }, nil +} + +func (c *containerAdapter) pullImage(ctx context.Context) error { + spec := c.container.spec() + + // Skip pulling if the image is referenced by image ID. + if _, err := digest.Parse(spec.Image); err == nil { + return nil + } + + // Skip pulling if the image is referenced by digest and already + // exists locally. + named, err := reference.ParseNormalizedNamed(spec.Image) + if err == nil { + if _, ok := named.(reference.Canonical); ok { + _, err := c.backend.LookupImage(spec.Image) + if err == nil { + return nil + } + } + } + + // if the image needs to be pulled, the auth config will be retrieved and updated + var encodedAuthConfig string + if spec.PullOptions != nil { + encodedAuthConfig = spec.PullOptions.RegistryAuth + } + + authConfig := &types.AuthConfig{} + if encodedAuthConfig != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + pr, pw := io.Pipe() + metaHeaders := map[string][]string{} + go func() { + // TODO @jhowardmsft LCOW Support: This will need revisiting as + // the stack is built up to include LCOW support for swarm. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + err := c.backend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw) + pw.CloseWithError(err) + }() + + dec := json.NewDecoder(pr) + dec.UseNumber() + m := map[string]interface{}{} + spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) + + lastStatus := "" + for { + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return err + } + l := log.G(ctx) + // limit pull progress logs unless the status changes + if spamLimiter.Allow() || lastStatus != m["status"] { + // if we have progress details, we have everything we need + if progress, ok := m["progressDetail"].(map[string]interface{}); ok { + // first, log the image and status + l = l.WithFields(logrus.Fields{ + "image": c.container.image(), + "status": m["status"], + }) + // then, if we have progress, log the progress + if progress["current"] != nil && progress["total"] != nil { + l = l.WithFields(logrus.Fields{ + "current": progress["current"], + "total": progress["total"], + }) + } + } + l.Debug("pull in progress") + } + // sometimes, we get no useful information at all, and add no fields + if status, ok := m["status"].(string); ok { + lastStatus = status + } + } + + // if the final stream object contained an error, return it + if errMsg, ok := m["error"]; ok { + return fmt.Errorf("%v", errMsg) + } + return nil +} + +func (c *containerAdapter) createNetworks(ctx context.Context) error { + for _, network := range c.container.networks() { + ncr, err := c.container.networkCreateRequest(network) + if err != nil { + return err + } + + if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing + if _, ok := err.(libnetwork.NetworkNameError); ok { + continue + } + + return err + } + } + + return nil +} + +func (c *containerAdapter) removeNetworks(ctx context.Context) error { + for _, nid := range c.container.networks() { + if err := c.backend.DeleteManagedNetwork(nid); err != nil { + switch err.(type) { + case *libnetwork.ActiveEndpointsError: + continue + case libnetwork.ErrNoSuchNetwork: + continue + default: + log.G(ctx).Errorf("network %s remove failed: %v", nid, err) + return err + } + } + } + + return nil +} + +func (c *containerAdapter) networkAttach(ctx context.Context) error { + config := c.container.createNetworkingConfig(c.backend) + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) +} + +func (c *containerAdapter) waitForDetach(ctx context.Context) error { + config := c.container.createNetworkingConfig(c.backend) + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) +} + +func (c *containerAdapter) create(ctx context.Context) error { + var cr containertypes.ContainerCreateCreatedBody + var err error + if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ + Name: c.container.name(), + Config: c.container.config(), + HostConfig: c.container.hostConfig(), + // Use the first network in container create + NetworkingConfig: c.container.createNetworkingConfig(c.backend), + }); err != nil { + return err + } + + // Docker daemon currently doesn't support multiple networks in container create + // Connect to all other networks + nc := c.container.connectNetworkingConfig(c.backend) + + if nc != nil { + for n, ep := range nc.EndpointsConfig { + if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { + return err + } + } + } + + container := c.container.task.Spec.GetContainer() + if container == nil { + return errors.New("unable to get container from task spec") + } + + if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil { + return err + } + + // configure secrets + secretRefs := convert.SecretReferencesFromGRPC(container.Secrets) + if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil { + return err + } + + configRefs := convert.ConfigReferencesFromGRPC(container.Configs) + if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil { + return err + } + + if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { + return err + } + + return nil +} + +// checkMounts ensures that the provided mounts won't have any host-specific +// problems at start up. For example, we disallow bind mounts without an +// existing path, which slightly different from the container API. +func (c *containerAdapter) checkMounts() error { + spec := c.container.spec() + for _, mount := range spec.Mounts { + switch mount.Type { + case api.MountTypeBind: + if _, err := os.Stat(mount.Source); os.IsNotExist(err) { + return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) + } + } + } + + return nil +} + +func (c *containerAdapter) start(ctx context.Context) error { + if err := c.checkMounts(); err != nil { + return err + } + + return c.backend.ContainerStart(c.container.name(), nil, "", "") +} + +func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { + cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) + if ctx.Err() != nil { + return types.ContainerJSON{}, ctx.Err() + } + if err != nil { + return types.ContainerJSON{}, err + } + return *cs, nil +} + +// events issues a call to the events API and returns a channel with all +// events. The stream of events can be shutdown by cancelling the context. +func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { + log.G(ctx).Debugf("waiting on events") + buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) + eventsq := make(chan events.Message, len(buffer)) + + for _, event := range buffer { + eventsq <- event + } + + go func() { + defer c.backend.UnsubscribeFromEvents(l) + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + log.G(ctx).Warnf("unexpected event message: %q", ev) + continue + } + select { + case eventsq <- jev: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + + return eventsq +} + +func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) { + return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning) +} + +func (c *containerAdapter) shutdown(ctx context.Context) error { + // Default stop grace period to nil (daemon will use the stopTimeout of the container) + var stopgrace *int + spec := c.container.spec() + if spec.StopGracePeriod != nil { + stopgraceValue := int(spec.StopGracePeriod.Seconds) + stopgrace = &stopgraceValue + } + return c.backend.ContainerStop(c.container.name(), stopgrace) +} + +func (c *containerAdapter) terminate(ctx context.Context) error { + return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) +} + +func (c *containerAdapter) remove(ctx context.Context) error { + return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ + RemoveVolume: true, + ForceRemove: true, + }) +} + +func (c *containerAdapter) createVolumes(ctx context.Context) error { + // Create plugin volumes that are embedded inside a Mount + for _, mount := range c.container.task.Spec.GetContainer().Mounts { + if mount.Type != api.MountTypeVolume { + continue + } + + if mount.VolumeOptions == nil { + continue + } + + if mount.VolumeOptions.DriverConfig == nil { + continue + } + + req := c.container.volumeCreateRequest(&mount) + + // Check if this volume exists on the engine + if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { + // TODO(amitshukla): Today, volume create through the engine api does not return an error + // when the named volume with the same parameters already exists. + // It returns an error if the driver name is different - that is a valid error + return err + } + + } + + return nil +} + +func (c *containerAdapter) activateServiceBinding() error { + return c.backend.ActivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) deactivateServiceBinding() error { + return c.backend.DeactivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) { + apiOptions := &types.ContainerLogsOptions{ + Follow: options.Follow, + + // Always say yes to Timestamps and Details. we make the decision + // of whether to return these to the user or not way higher up the + // stack. + Timestamps: true, + Details: true, + } + + if options.Since != nil { + since, err := gogotypes.TimestampFromProto(options.Since) + if err != nil { + return nil, err + } + // print since as this formatted string because the docker container + // logs interface expects it like this. + // see github.com/docker/docker/api/types/time.ParseTimestamps + apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond())) + } + + if options.Tail < 0 { + // See protobuf documentation for details of how this works. + apiOptions.Tail = fmt.Sprint(-options.Tail - 1) + } else if options.Tail > 0 { + return nil, errors.New("tail relative to start of logs not supported via docker API") + } + + if len(options.Streams) == 0 { + // empty == all + apiOptions.ShowStdout, apiOptions.ShowStderr = true, true + } else { + for _, stream := range options.Streams { + switch stream { + case api.LogStreamStdout: + apiOptions.ShowStdout = true + case api.LogStreamStderr: + apiOptions.ShowStderr = true + } + } + } + msgs, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions) + if err != nil { + return nil, err + } + return msgs, nil +} + +// todo: typed/wrapped errors +func isContainerCreateNameConflict(err error) bool { + return strings.Contains(err.Error(), "Conflict. The name") +} + +func isUnknownContainer(err error) bool { + return strings.Contains(err.Error(), "No such container:") +} + +func isStoppedContainer(err error) bool { + return strings.Contains(err.Error(), "is already stopped") +} diff --git a/daemon/cluster/executor/container/attachment.go b/daemon/cluster/executor/container/attachment.go new file mode 100644 index 0000000..54f95a1 --- /dev/null +++ b/daemon/cluster/executor/container/attachment.go @@ -0,0 +1,81 @@ +package container + +import ( + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// networkAttacherController implements agent.Controller against docker's API. +// +// networkAttacherController manages the lifecycle of network +// attachment of a docker unmanaged container managed as a task from +// agent point of view. It provides network attachment information to +// the unmanaged container for it to attach to the network and run. +type networkAttacherController struct { + backend executorpkg.Backend + task *api.Task + adapter *containerAdapter + closed chan struct{} +} + +func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, task, dependencies) + if err != nil { + return nil, err + } + + return &networkAttacherController{ + backend: b, + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) error { + return nil +} + +func (nc *networkAttacherController) Prepare(ctx context.Context) error { + // Make sure all the networks that the task needs are created. + if err := nc.adapter.createNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Start(ctx context.Context) error { + return nc.adapter.networkAttach(ctx) +} + +func (nc *networkAttacherController) Wait(pctx context.Context) error { + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + return nc.adapter.waitForDetach(ctx) +} + +func (nc *networkAttacherController) Shutdown(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Terminate(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Remove(ctx context.Context) error { + // Try removing the network referenced in this task in case this + // task is the last one referencing it + if err := nc.adapter.removeNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Close() error { + return nil +} diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go new file mode 100644 index 0000000..501da32 --- /dev/null +++ b/daemon/cluster/executor/container/container.go @@ -0,0 +1,675 @@ +package container + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + enginecontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + enginemount "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" + netconst "github.com/docker/libnetwork/datastore" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/template" + gogotypes "github.com/gogo/protobuf/types" +) + +const ( + // Explicitly use the kernel's default setting for CPU quota of 100ms. + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + cpuQuotaPeriod = 100 * time.Millisecond + + // systemLabelPrefix represents the reserved namespace for system labels. + systemLabelPrefix = "com.docker.swarm" +) + +// containerConfig converts task properties into docker container compatible +// components. +type containerConfig struct { + task *api.Task + networksAttachments map[string]*api.NetworkAttachment +} + +// newContainerConfig returns a validated container config. No methods should +// return an error if this function returns without error. +func newContainerConfig(t *api.Task) (*containerConfig, error) { + var c containerConfig + return &c, c.setTask(t) +} + +func (c *containerConfig) setTask(t *api.Task) error { + if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { + return exec.ErrRuntimeUnsupported + } + + container := t.Spec.GetContainer() + if container != nil { + if container.Image == "" { + return ErrImageRequired + } + + if err := validateMounts(container.Mounts); err != nil { + return err + } + } + + // index the networks by name + c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) + for _, attachment := range t.Networks { + c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment + } + + c.task = t + + if t.Spec.GetContainer() != nil { + preparedSpec, err := template.ExpandContainerSpec(t) + if err != nil { + return err + } + c.task.Spec.Runtime = &api.TaskSpec_Container{ + Container: preparedSpec, + } + } + + return nil +} + +func (c *containerConfig) id() string { + attachment := c.task.Spec.GetAttachment() + if attachment == nil { + return "" + } + + return attachment.ContainerID +} + +func (c *containerConfig) taskID() string { + return c.task.ID +} + +func (c *containerConfig) endpoint() *api.Endpoint { + return c.task.Endpoint +} + +func (c *containerConfig) spec() *api.ContainerSpec { + return c.task.Spec.GetContainer() +} + +func (c *containerConfig) nameOrID() string { + if c.task.Spec.GetContainer() != nil { + return c.name() + } + + return c.id() +} + +func (c *containerConfig) name() string { + if c.task.Annotations.Name != "" { + // if set, use the container Annotations.Name field, set in the orchestrator. + return c.task.Annotations.Name + } + + slot := fmt.Sprint(c.task.Slot) + if slot == "" || c.task.Slot == 0 { + slot = c.task.NodeID + } + + // fallback to service.slot.id. + return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID) +} + +func (c *containerConfig) image() string { + raw := c.spec().Image + ref, err := reference.ParseNormalizedNamed(raw) + if err != nil { + return raw + } + return reference.FamiliarString(reference.TagNameOnly(ref)) +} + +func (c *containerConfig) portBindings() nat.PortMap { + portBindings := nat.PortMap{} + if c.task.Endpoint == nil { + return portBindings + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + binding := []nat.PortBinding{ + {}, + } + + if portConfig.PublishedPort != 0 { + binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort)) + } + portBindings[port] = binding + } + + return portBindings +} + +func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { + exposedPorts := make(map[nat.Port]struct{}) + if c.task.Endpoint == nil { + return exposedPorts + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + exposedPorts[port] = struct{}{} + } + + return exposedPorts +} + +func (c *containerConfig) config() *enginecontainer.Config { + config := &enginecontainer.Config{ + Labels: c.labels(), + StopSignal: c.spec().StopSignal, + Tty: c.spec().TTY, + OpenStdin: c.spec().OpenStdin, + User: c.spec().User, + Env: c.spec().Env, + Hostname: c.spec().Hostname, + WorkingDir: c.spec().Dir, + Image: c.image(), + ExposedPorts: c.exposedPorts(), + Healthcheck: c.healthcheck(), + } + + if len(c.spec().Command) > 0 { + // If Command is provided, we replace the whole invocation with Command + // by replacing Entrypoint and specifying Cmd. Args is ignored in this + // case. + config.Entrypoint = append(config.Entrypoint, c.spec().Command...) + config.Cmd = append(config.Cmd, c.spec().Args...) + } else if len(c.spec().Args) > 0 { + // In this case, we assume the image has an Entrypoint and Args + // specifies the arguments for that entrypoint. + config.Cmd = c.spec().Args + } + + return config +} + +func (c *containerConfig) labels() map[string]string { + var ( + system = map[string]string{ + "task": "", // mark as cluster task + "task.id": c.task.ID, + "task.name": c.name(), + "node.id": c.task.NodeID, + "service.id": c.task.ServiceID, + "service.name": c.task.ServiceAnnotations.Name, + } + labels = make(map[string]string) + ) + + // base labels are those defined in the spec. + for k, v := range c.spec().Labels { + labels[k] = v + } + + // we then apply the overrides from the task, which may be set via the + // orchestrator. + for k, v := range c.task.Annotations.Labels { + labels[k] = v + } + + // finally, we apply the system labels, which override all labels. + for k, v := range system { + labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v + } + + return labels +} + +func (c *containerConfig) mounts() []enginemount.Mount { + var r []enginemount.Mount + for _, mount := range c.spec().Mounts { + r = append(r, convertMount(mount)) + } + return r +} + +func convertMount(m api.Mount) enginemount.Mount { + mount := enginemount.Mount{ + Source: m.Source, + Target: m.Target, + ReadOnly: m.ReadOnly, + } + + switch m.Type { + case api.MountTypeBind: + mount.Type = enginemount.TypeBind + case api.MountTypeVolume: + mount.Type = enginemount.TypeVolume + case api.MountTypeTmpfs: + mount.Type = enginemount.TypeTmpfs + } + + if m.BindOptions != nil { + mount.BindOptions = &enginemount.BindOptions{} + switch m.BindOptions.Propagation { + case api.MountPropagationRPrivate: + mount.BindOptions.Propagation = enginemount.PropagationRPrivate + case api.MountPropagationPrivate: + mount.BindOptions.Propagation = enginemount.PropagationPrivate + case api.MountPropagationRSlave: + mount.BindOptions.Propagation = enginemount.PropagationRSlave + case api.MountPropagationSlave: + mount.BindOptions.Propagation = enginemount.PropagationSlave + case api.MountPropagationRShared: + mount.BindOptions.Propagation = enginemount.PropagationRShared + case api.MountPropagationShared: + mount.BindOptions.Propagation = enginemount.PropagationShared + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &enginemount.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + } + if m.VolumeOptions.Labels != nil { + mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels)) + for k, v := range m.VolumeOptions.Labels { + mount.VolumeOptions.Labels[k] = v + } + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &enginemount.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + } + if m.VolumeOptions.DriverConfig.Options != nil { + mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options)) + for k, v := range m.VolumeOptions.DriverConfig.Options { + mount.VolumeOptions.DriverConfig.Options[k] = v + } + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &enginemount.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + return mount +} + +func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { + hcSpec := c.spec().Healthcheck + if hcSpec == nil { + return nil + } + interval, _ := gogotypes.DurationFromProto(hcSpec.Interval) + timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod) + return &enginecontainer.HealthConfig{ + Test: hcSpec.Test, + Interval: interval, + Timeout: timeout, + Retries: int(hcSpec.Retries), + StartPeriod: startPeriod, + } +} + +func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { + hc := &enginecontainer.HostConfig{ + Resources: c.resources(), + GroupAdd: c.spec().Groups, + PortBindings: c.portBindings(), + Mounts: c.mounts(), + ReadonlyRootfs: c.spec().ReadOnly, + } + + if c.spec().DNSConfig != nil { + hc.DNS = c.spec().DNSConfig.Nameservers + hc.DNSSearch = c.spec().DNSConfig.Search + hc.DNSOptions = c.spec().DNSConfig.Options + } + + c.applyPrivileges(hc) + + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // However, the format of ExtraHosts in HostConfig is + // : + // We need to do the conversion here + // (Alias is ignored for now) + for _, entry := range c.spec().Hosts { + parts := strings.Fields(entry) + if len(parts) > 1 { + hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0])) + } + } + + if c.task.LogDriver != nil { + hc.LogConfig = enginecontainer.LogConfig{ + Type: c.task.LogDriver.Name, + Config: c.task.LogDriver.Options, + } + } + + if len(c.task.Networks) > 0 { + labels := c.task.Networks[0].Network.Spec.Annotations.Labels + name := c.task.Networks[0].Network.Spec.Annotations.Name + if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" { + hc.NetworkMode = enginecontainer.NetworkMode(name) + } + } + + return hc +} + +// This handles the case of volumes that are defined inside a service Mount +func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumesCreateBody { + var ( + driverName string + driverOpts map[string]string + labels map[string]string + ) + + if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { + driverName = mount.VolumeOptions.DriverConfig.Name + driverOpts = mount.VolumeOptions.DriverConfig.Options + labels = mount.VolumeOptions.Labels + } + + if mount.VolumeOptions != nil { + return &volumetypes.VolumesCreateBody{ + Name: mount.Source, + Driver: driverName, + DriverOpts: driverOpts, + Labels: labels, + } + } + return nil +} + +func (c *containerConfig) resources() enginecontainer.Resources { + resources := enginecontainer.Resources{} + + // If no limits are specified let the engine use its defaults. + // + // TODO(aluzzardi): We might want to set some limits anyway otherwise + // "unlimited" tasks will step over the reservation of other tasks. + r := c.task.Spec.Resources + if r == nil || r.Limits == nil { + return resources + } + + if r.Limits.MemoryBytes > 0 { + resources.Memory = r.Limits.MemoryBytes + } + + if r.Limits.NanoCPUs > 0 { + // CPU Period must be set in microseconds. + resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) + resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 + } + + return resources +} + +// Docker daemon supports just 1 network during container create. +func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { + networks = c.task.Networks + } + + epConfig := make(map[string]*network.EndpointSettings) + if len(networks) > 0 { + epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b) + } + + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create +func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil { + networks = c.task.Networks + } + // First network is used during container create. Other networks are used in "docker network connect" + if len(networks) < 2 { + return nil + } + + epConfig := make(map[string]*network.EndpointSettings) + for _, na := range networks[1:] { + epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b) + } + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings { + var ipv4, ipv6 string + for _, addr := range na.Addresses { + ip, _, err := net.ParseCIDR(addr) + if err != nil { + continue + } + + if ip.To4() != nil { + ipv4 = ip.String() + continue + } + + if ip.To16() != nil { + ipv6 = ip.String() + } + } + + n := &network.EndpointSettings{ + NetworkID: na.Network.ID, + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: ipv4, + IPv6Address: ipv6, + }, + DriverOpts: na.DriverAttachmentOpts, + } + if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil { + n.NetworkID = ln.ID() + } + } + return n +} + +func (c *containerConfig) virtualIP(networkID string) string { + if c.task.Endpoint == nil { + return "" + } + + for _, eVip := range c.task.Endpoint.VirtualIPs { + // We only support IPv4 VIPs for now. + if eVip.NetworkID == networkID { + vip, _, err := net.ParseCIDR(eVip.Addr) + if err != nil { + return "" + } + + return vip.String() + } + } + + return "" +} + +func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { + if len(c.task.Networks) == 0 { + return nil + } + + logrus.Debugf("Creating service config in agent for t = %+v", c.task) + svcCfg := &clustertypes.ServiceConfig{ + Name: c.task.ServiceAnnotations.Name, + Aliases: make(map[string][]string), + ID: c.task.ServiceID, + VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), + } + + for _, na := range c.task.Networks { + svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ + // We support only IPv4 virtual IP for now. + IPv4: c.virtualIP(na.Network.ID), + } + if len(na.Aliases) > 0 { + svcCfg.Aliases[na.Network.ID] = na.Aliases + } + } + + if c.task.Endpoint != nil { + for _, ePort := range c.task.Endpoint.Ports { + if ePort.PublishMode != api.PublishModeIngress { + continue + } + + svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ + Name: ePort.Name, + Protocol: int32(ePort.Protocol), + TargetPort: ePort.TargetPort, + PublishedPort: ePort.PublishedPort, + }) + } + } + + return svcCfg +} + +// networks returns a list of network names attached to the container. The +// returned name can be used to lookup the corresponding network create +// options. +func (c *containerConfig) networks() []string { + var networks []string + + for name := range c.networksAttachments { + networks = append(networks, name) + } + + return networks +} + +func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { + na, ok := c.networksAttachments[name] + if !ok { + return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") + } + + options := types.NetworkCreate{ + // ID: na.Network.ID, + Labels: na.Network.Spec.Annotations.Labels, + Internal: na.Network.Spec.Internal, + Attachable: na.Network.Spec.Attachable, + Ingress: convert.IsIngressNetwork(na.Network), + EnableIPv6: na.Network.Spec.Ipv6Enabled, + CheckDuplicate: true, + Scope: netconst.SwarmScope, + } + + if na.Network.Spec.GetNetwork() != "" { + options.ConfigFrom = &network.ConfigReference{ + Network: na.Network.Spec.GetNetwork(), + } + } + + if na.Network.DriverState != nil { + options.Driver = na.Network.DriverState.Name + options.Options = na.Network.DriverState.Options + } + if na.Network.IPAM != nil { + options.IPAM = &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + Options: na.Network.IPAM.Driver.Options, + } + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + } + + return clustertypes.NetworkCreateRequest{ + ID: na.Network.ID, + NetworkCreateRequest: types.NetworkCreateRequest{ + Name: name, + NetworkCreate: options, + }, + }, nil +} + +func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) { + privileges := c.spec().Privileges + if privileges == nil { + return + } + + credentials := privileges.CredentialSpec + if credentials != nil { + switch credentials.Source.(type) { + case *api.Privileges_CredentialSpec_File: + hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile()) + case *api.Privileges_CredentialSpec_Registry: + hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry()) + } + } + + selinux := privileges.SELinuxContext + if selinux != nil { + if selinux.Disable { + hc.SecurityOpt = append(hc.SecurityOpt, "label=disable") + } + if selinux.User != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User) + } + if selinux.Role != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role) + } + if selinux.Level != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level) + } + if selinux.Type != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type) + } + } +} + +func (c containerConfig) eventFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("type", events.ContainerEventType) + filter.Add("name", c.name()) + filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) + return filter +} diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go new file mode 100644 index 0000000..8e95816 --- /dev/null +++ b/daemon/cluster/executor/container/controller.go @@ -0,0 +1,697 @@ +package container + +import ( + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +const defaultGossipConvergeDelay = 2 * time.Second + +// controller implements agent.Controller against docker's API. +// +// Most operations against docker's API are done through the container name, +// which is unique to the task. +type controller struct { + task *api.Task + adapter *containerAdapter + closed chan struct{} + err error + + pulled chan struct{} // closed after pull + cancelPull func() // cancels pull context if not nil + pullErr error // pull error, only read after pulled closed +} + +var _ exec.Controller = &controller{} + +// NewController returns a docker exec runner for the provided task. +func newController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, task, dependencies) + if err != nil { + return nil, err + } + + return &controller{ + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (r *controller) Task() (*api.Task, error) { + return r.task, nil +} + +// ContainerStatus returns the container-specific status for the task. +func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + return nil, err + } + return parseContainerStatus(ctnr) +} + +func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + + return nil, err + } + + return parsePortStatus(ctnr) +} + +// Update tasks a recent task update and applies it to the container. +func (r *controller) Update(ctx context.Context, t *api.Task) error { + // TODO(stevvooe): While assignment of tasks is idempotent, we do allow + // updates of metadata, such as labelling, as well as any other properties + // that make sense. + return nil +} + +// Prepare creates a container and ensures the image is pulled. +// +// If the container has already be created, exec.ErrTaskPrepared is returned. +func (r *controller) Prepare(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + // Make sure all the networks that the task needs are created. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + // Make sure all the volumes that the task needs are created. + if err := r.adapter.createVolumes(ctx); err != nil { + return err + } + + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + if r.pulled == nil { + // Fork the pull to a different context to allow pull to continue + // on re-entrant calls to Prepare. This ensures that Prepare can be + // idempotent and not incur the extra cost of pulling when + // cancelled on updates. + var pctx context.Context + + r.pulled = make(chan struct{}) + pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. + + go func() { + defer close(r.pulled) + r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled + }() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-r.pulled: + if r.pullErr != nil { + // NOTE(stevvooe): We always try to pull the image to make sure we have + // the most up to date version. This will return an error, but we only + // log it. If the image truly doesn't exist, the create below will + // error out. + // + // This gives us some nice behavior where we use up to date versions of + // mutable tags, but will still run if the old image is available but a + // registry is down. + // + // If you don't want this behavior, lock down your image to an + // immutable tag or digest. + log.G(ctx).WithError(r.pullErr).Error("pulling image failed") + } + } + } + + if err := r.adapter.create(ctx); err != nil { + if isContainerCreateNameConflict(err) { + if _, err := r.adapter.inspect(ctx); err != nil { + return err + } + + // container is already created. success! + return exec.ErrTaskPrepared + } + + return err + } + + return nil +} + +// Start the container. An error will be returned if the container is already started. +func (r *controller) Start(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return err + } + + // Detect whether the container has *ever* been started. If so, we don't + // issue the start. + // + // TODO(stevvooe): This is very racy. While reading inspect, another could + // start the process and we could end up starting it twice. + if ctnr.State.Status != "created" { + return exec.ErrTaskStarted + } + + for { + if err := r.adapter.start(ctx); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + // Retry network creation again if we + // failed because some of the networks + // were not found. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + continue + } + + return errors.Wrap(err, "starting container failed") + } + + break + } + + // no health check + if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" { + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name()) + return err + } + return nil + } + + // wait for container to be healthy + eventq := r.adapter.events(ctx) + + var healthErr error + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "die": // exit on terminal events + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "die event received") + } else if ctnr.State.ExitCode != 0 { + return &exitError{code: ctnr.State.ExitCode, cause: healthErr} + } + + return nil + case "destroy": + // If we get here, something has gone wrong but we want to exit + // and report anyways. + return ErrContainerDestroyed + case "health_status: unhealthy": + // in this case, we stop the container and report unhealthy status + if err := r.Shutdown(ctx); err != nil { + return errors.Wrap(err, "unhealthy container shutdown failed") + } + // set health check error, and wait for container to fully exit ("die" event) + healthErr = ErrContainerUnhealthy + case "health_status: healthy": + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name()) + return err + } + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +// Wait on the container to exit. +func (r *controller) Wait(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + healthErr := make(chan error, 1) + go func() { + ectx, cancel := context.WithCancel(ctx) // cancel event context on first event + defer cancel() + if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { + healthErr <- ErrContainerUnhealthy + if err := r.Shutdown(ectx); err != nil { + log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") + } + } + }() + + waitC, err := r.adapter.wait(ctx) + if err != nil { + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + exitErr := &exitError{ + code: status.ExitCode(), + } + + // Set the cause if it is knowable. + select { + case e := <-healthErr: + exitErr.cause = e + default: + if status.Err() != nil { + exitErr.cause = status.Err() + } + } + + return exitErr + } + + return nil +} + +func (r *controller) hasServiceBinding() bool { + if r.task == nil { + return false + } + + // service is attached to a network besides the default bridge + for _, na := range r.task.Networks { + if na.Network == nil || + na.Network.DriverState == nil || + na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" { + continue + } + return true + } + + return false +} + +// Shutdown the container cleanly. +func (r *controller) Shutdown(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if r.hasServiceBinding() { + // remove container from service binding + if err := r.adapter.deactivateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name()) + // Don't return an error here, because failure to deactivate + // the service binding is expected if the container was never + // started. + } + + // add a delay for gossip converge + // TODO(dongluochen): this delay shoud be configurable to fit different cluster size and network delay. + time.Sleep(defaultGossipConvergeDelay) + } + + if err := r.adapter.shutdown(ctx); err != nil { + if isUnknownContainer(err) || isStoppedContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Terminate the container, with force. +func (r *controller) Terminate(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if err := r.adapter.terminate(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Remove the container and its resources. +func (r *controller) Remove(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // It may be necessary to shut down the task before removing it. + if err := r.Shutdown(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + // This may fail if the task was already shut down. + log.G(ctx).WithError(err).Debug("shutdown failed on removal") + } + + // Try removing networks referenced in this task in case this + // task is the last one referencing it + if err := r.adapter.removeNetworks(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + return err + } + + if err := r.adapter.remove(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + return nil +} + +// waitReady waits for a container to be "ready". +// Ready means it's past the started state. +func (r *controller) waitReady(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + eventq := r.adapter.events(ctx) + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if !isUnknownContainer(err) { + return errors.Wrap(err, "inspect container failed") + } + } else { + switch ctnr.State.Status { + case "running", "exited", "dead": + return nil + } + } + + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "start": + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { + if err := r.checkClosed(); err != nil { + return err + } + + // if we're following, wait for this container to be ready. there is a + // problem here: if the container will never be ready (for example, it has + // been totally deleted) then this will wait forever. however, this doesn't + // actually cause any UI issues, and shouldn't be a problem. the stuck wait + // will go away when the follow (context) is canceled. + if options.Follow { + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } + } + // if we're not following, we're not gonna wait for the container to be + // ready. just call logs. if the container isn't ready, the call will fail + // and return an error. no big deal, we don't care, we only want the logs + // we can get RIGHT NOW with no follow + + logsContext, cancel := context.WithCancel(ctx) + msgs, err := r.adapter.logs(logsContext, options) + defer cancel() + if err != nil { + return errors.Wrap(err, "failed getting container logs") + } + + var ( + // use a rate limiter to keep things under control but also provides some + // ability coalesce messages. + limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s + msgctx = api.LogContext{ + NodeID: r.task.NodeID, + ServiceID: r.task.ServiceID, + TaskID: r.task.ID, + } + ) + + for { + msg, ok := <-msgs + if !ok { + // we're done here, no more messages + return nil + } + + if msg.Err != nil { + // the defered cancel closes the adapter's log stream + return msg.Err + } + + // wait here for the limiter to catch up + if err := limiter.WaitN(ctx, len(msg.Line)); err != nil { + return errors.Wrap(err, "failed rate limiter") + } + tsp, err := gogotypes.TimestampProto(msg.Timestamp) + if err != nil { + return errors.Wrap(err, "failed to convert timestamp") + } + var stream api.LogStream + if msg.Source == "stdout" { + stream = api.LogStreamStdout + } else if msg.Source == "stderr" { + stream = api.LogStreamStderr + } + + // parse the details out of the Attrs map + attrs := []api.LogAttr{} + for k, v := range msg.Attrs { + attr := api.LogAttr{Key: k, Value: v} + attrs = append(attrs, attr) + } + + if err := publisher.Publish(ctx, api.LogMessage{ + Context: msgctx, + Timestamp: tsp, + Stream: stream, + Attrs: attrs, + Data: msg.Line, + }); err != nil { + return errors.Wrap(err, "failed to publish log message") + } + } +} + +// Close the runner and clean up any ephemeral resources. +func (r *controller) Close() error { + select { + case <-r.closed: + return r.err + default: + if r.cancelPull != nil { + r.cancelPull() + } + + r.err = exec.ErrControllerClosed + close(r.closed) + } + return nil +} + +func (r *controller) matchevent(event events.Message) bool { + if event.Type != events.ContainerEventType { + return false + } + + // TODO(stevvooe): Filter based on ID matching, in addition to name. + + // Make sure the events are for this container. + if event.Actor.Attributes["name"] != r.adapter.container.name() { + return false + } + + return true +} + +func (r *controller) checkClosed() error { + select { + case <-r.closed: + return r.err + default: + return nil + } +} + +func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { + status := &api.ContainerStatus{ + ContainerID: ctnr.ID, + PID: int32(ctnr.State.Pid), + ExitCode: int32(ctnr.State.ExitCode), + } + + return status, nil +} + +func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) { + status := &api.PortStatus{} + + if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 { + exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports) + if err != nil { + return nil, err + } + status.Ports = exposedPorts + } + + return status, nil +} + +func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { + exposedPorts := make([]*api.PortConfig, 0, len(portMap)) + + for portProtocol, mapping := range portMap { + parts := strings.SplitN(string(portProtocol), "/", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid port mapping: %s", portProtocol) + } + + port, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, err + } + + protocol := api.ProtocolTCP + switch strings.ToLower(parts[1]) { + case "tcp": + protocol = api.ProtocolTCP + case "udp": + protocol = api.ProtocolUDP + default: + return nil, fmt.Errorf("invalid protocol: %s", parts[1]) + } + + for _, binding := range mapping { + hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) + if err != nil { + return nil, err + } + + // TODO(aluzzardi): We're losing the port `name` here since + // there's no way to retrieve it back from the Engine. + exposedPorts = append(exposedPorts, &api.PortConfig{ + PublishMode: api.PublishModeHost, + Protocol: protocol, + TargetPort: uint32(port), + PublishedPort: uint32(hostPort), + }) + } + } + + return exposedPorts, nil +} + +type exitError struct { + code int + cause error +} + +func (e *exitError) Error() string { + if e.cause != nil { + return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) + } + + return fmt.Sprintf("task: non-zero exit (%v)", e.code) +} + +func (e *exitError) ExitCode() int { + return int(e.code) +} + +func (e *exitError) Cause() error { + return e.cause +} + +// checkHealth blocks until unhealthy container is detected or ctx exits +func (r *controller) checkHealth(ctx context.Context) error { + eventq := r.adapter.events(ctx) + + for { + select { + case <-ctx.Done(): + return nil + case <-r.closed: + return nil + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "health_status: unhealthy": + return ErrContainerUnhealthy + } + } + } +} diff --git a/daemon/cluster/executor/container/errors.go b/daemon/cluster/executor/container/errors.go new file mode 100644 index 0000000..535d9b5 --- /dev/null +++ b/daemon/cluster/executor/container/errors.go @@ -0,0 +1,17 @@ +package container + +import ( + "errors" +) + +var ( + // ErrImageRequired returned if a task is missing the image definition. + ErrImageRequired = errors.New("dockerexec: image required") + + // ErrContainerDestroyed returned when a container is prematurely destroyed + // during a wait call. + ErrContainerDestroyed = errors.New("dockerexec: container destroyed") + + // ErrContainerUnhealthy returned if controller detects the health check failure + ErrContainerUnhealthy = errors.New("dockerexec: unhealthy container") +) diff --git a/daemon/cluster/executor/container/executor.go b/daemon/cluster/executor/container/executor.go new file mode 100644 index 0000000..03a00cc --- /dev/null +++ b/daemon/cluster/executor/container/executor.go @@ -0,0 +1,241 @@ +package container + +import ( + "fmt" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/controllers/plugin" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + "golang.org/x/net/context" +) + +type executor struct { + backend executorpkg.Backend + dependencies exec.DependencyManager +} + +// NewExecutor returns an executor from the docker client. +func NewExecutor(b executorpkg.Backend) exec.Executor { + return &executor{ + backend: b, + dependencies: agent.NewDependencyManager(), + } +} + +// Describe returns the underlying node description from the docker client. +func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { + info, err := e.backend.SystemInfo() + if err != nil { + return nil, err + } + + plugins := map[api.PluginDescription]struct{}{} + addPlugins := func(typ string, names []string) { + for _, name := range names { + plugins[api.PluginDescription{ + Type: typ, + Name: name, + }] = struct{}{} + } + } + + // add v1 plugins + addPlugins("Volume", info.Plugins.Volume) + // Add builtin driver "overlay" (the only builtin multi-host driver) to + // the plugin list by default. + addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) + addPlugins("Authorization", info.Plugins.Authorization) + addPlugins("Log", info.Plugins.Log) + + // add v2 plugins + v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs()) + if err == nil { + for _, plgn := range v2Plugins { + for _, typ := range plgn.Config.Interface.Types { + if typ.Prefix != "docker" || !plgn.Enabled { + continue + } + plgnTyp := typ.Capability + switch typ.Capability { + case "volumedriver": + plgnTyp = "Volume" + case "networkdriver": + plgnTyp = "Network" + case "logdriver": + plgnTyp = "Log" + } + + plugins[api.PluginDescription{ + Type: plgnTyp, + Name: plgn.Name, + }] = struct{}{} + } + } + } + + pluginFields := make([]api.PluginDescription, 0, len(plugins)) + for k := range plugins { + pluginFields = append(pluginFields, k) + } + + sort.Sort(sortedPlugins(pluginFields)) + + // parse []string labels into a map[string]string + labels := map[string]string{} + for _, l := range info.Labels { + stringSlice := strings.SplitN(l, "=", 2) + // this will take the last value in the list for a given key + // ideally, one shouldn't assign multiple values to the same key + if len(stringSlice) > 1 { + labels[stringSlice[0]] = stringSlice[1] + } + } + + description := &api.NodeDescription{ + Hostname: info.Name, + Platform: &api.Platform{ + Architecture: info.Architecture, + OS: info.OSType, + }, + Engine: &api.EngineDescription{ + EngineVersion: info.ServerVersion, + Labels: labels, + Plugins: pluginFields, + }, + Resources: &api.Resources{ + NanoCPUs: int64(info.NCPU) * 1e9, + MemoryBytes: info.MemTotal, + }, + } + + return description, nil +} + +func (e *executor) Configure(ctx context.Context, node *api.Node) error { + na := node.Attachment + if na == nil { + e.backend.ReleaseIngress() + return nil + } + + options := types.NetworkCreate{ + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + }, + Options: na.Network.DriverState.Options, + Ingress: true, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + _, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ + ID: na.Network.ID, + NetworkCreateRequest: types.NetworkCreateRequest{ + Name: na.Network.Spec.Annotations.Name, + NetworkCreate: options, + }, + }, na.Addresses[0]) + + return err +} + +// Controller returns a docker container runner. +func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + dependencyGetter := agent.Restrict(e.dependencies, t) + + if t.Spec.GetAttachment() != nil { + return newNetworkAttacherController(e.backend, t, dependencyGetter) + } + + var ctlr exec.Controller + switch r := t.Spec.GetRuntime().(type) { + case *api.TaskSpec_Generic: + logrus.WithFields(logrus.Fields{ + "kind": r.Generic.Kind, + "type_url": r.Generic.Payload.TypeUrl, + }).Debug("custom runtime requested") + runtimeKind, err := naming.Runtime(t.Spec) + if err != nil { + return ctlr, err + } + switch runtimeKind { + case string(swarmtypes.RuntimePlugin): + c, err := plugin.NewController() + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime type: %q", r.Generic.Kind) + } + case *api.TaskSpec_Container: + c, err := newController(e.backend, t, dependencyGetter) + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime: %q", r) + } + + return ctlr, nil +} + +func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { + nwKeys := []*networktypes.EncryptionKey{} + for _, key := range keys { + nwKey := &networktypes.EncryptionKey{ + Subsystem: key.Subsystem, + Algorithm: int32(key.Algorithm), + Key: make([]byte, len(key.Key)), + LamportTime: key.LamportTime, + } + copy(nwKey.Key, key.Key) + nwKeys = append(nwKeys, nwKey) + } + e.backend.SetNetworkBootstrapKeys(nwKeys) + + return nil +} + +func (e *executor) Secrets() exec.SecretsManager { + return e.dependencies.Secrets() +} + +func (e *executor) Configs() exec.ConfigsManager { + return e.dependencies.Configs() +} + +type sortedPlugins []api.PluginDescription + +func (sp sortedPlugins) Len() int { return len(sp) } + +func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } + +func (sp sortedPlugins) Less(i, j int) bool { + if sp[i].Type != sp[j].Type { + return sp[i].Type < sp[j].Type + } + return sp[i].Name < sp[j].Name +} diff --git a/daemon/cluster/executor/container/health_test.go b/daemon/cluster/executor/container/health_test.go new file mode 100644 index 0000000..b6f1885 --- /dev/null +++ b/daemon/cluster/executor/container/health_test.go @@ -0,0 +1,100 @@ +// +build !windows + +package container + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/events" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func TestHealthStates(t *testing.T) { + + // set up environment: events, task, container .... + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + task := &api.Task{ + ID: "id", + ServiceID: "sid", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + }, + Annotations: api.Annotations{Name: "name"}, + } + + c := &container.Container{ + ID: "id", + Name: "name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + } + + daemon := &daemon.Daemon{ + EventsService: e, + } + + controller, err := newController(daemon, task, nil) + if err != nil { + t.Fatalf("create controller fail %v", err) + } + + errChan := make(chan error, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // fire checkHealth + go func() { + err := controller.checkHealth(ctx) + select { + case errChan <- err: + case <-ctx.Done(): + } + }() + + // send an event and expect to get expectedErr + // if expectedErr is nil, shouldn't get any error + logAndExpect := func(msg string, expectedErr error) { + daemon.LogContainerEvent(c, msg) + + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + select { + case err := <-errChan: + if err != expectedErr { + t.Fatalf("expect error %v, but get %v", expectedErr, err) + } + case <-timer.C: + if expectedErr != nil { + t.Fatal("time limit exceeded, didn't get expected error") + } + } + } + + // events that are ignored by checkHealth + logAndExpect("health_status: running", nil) + logAndExpect("health_status: healthy", nil) + logAndExpect("die", nil) + + // unhealthy event will be caught by checkHealth + logAndExpect("health_status: unhealthy", ErrContainerUnhealthy) +} diff --git a/daemon/cluster/executor/container/validate.go b/daemon/cluster/executor/container/validate.go new file mode 100644 index 0000000..af17f5b --- /dev/null +++ b/daemon/cluster/executor/container/validate.go @@ -0,0 +1,40 @@ +package container + +import ( + "errors" + "fmt" + "path/filepath" + + "github.com/docker/swarmkit/api" +) + +func validateMounts(mounts []api.Mount) error { + for _, mount := range mounts { + // Target must always be absolute + if !filepath.IsAbs(mount.Target) { + return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) + } + + switch mount.Type { + // The checks on abs paths are required due to the container API confusing + // volume mounts as bind mounts when the source is absolute (and vice-versa) + // See #25253 + // TODO: This is probably not necessary once #22373 is merged + case api.MountTypeBind: + if !filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) + } + case api.MountTypeVolume: + if filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) + } + case api.MountTypeTmpfs: + if mount.Source != "" { + return errors.New("invalid tmpfs source, source must be empty") + } + default: + return fmt.Errorf("invalid mount type: %s", mount.Type) + } + } + return nil +} diff --git a/daemon/cluster/executor/container/validate_test.go b/daemon/cluster/executor/container/validate_test.go new file mode 100644 index 0000000..9d98e2c --- /dev/null +++ b/daemon/cluster/executor/container/validate_test.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/swarmkit/api" +) + +func newTestControllerWithMount(m api.Mount) (*controller, error) { + return newController(&daemon.Daemon{}, &api.Task{ + ID: stringid.GenerateRandomID(), + ServiceID: stringid.GenerateRandomID(), + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + Mounts: []api.Mount{m}, + }, + }, + }, + }, nil) +} + +func TestControllerValidateMountBind(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with non-existing source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsNonExistent, + Target: testAbsPath, + }); err != nil { + t.Fatalf("controller should not error at creation: %v", err) + } + + // with proper source + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountBind") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountVolume(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: testAbsPath, + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid volume mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: "foo", + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountTarget(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountTarget") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsPath, + Target: "foo", + }); err == nil || !strings.Contains(err.Error(), "invalid mount target") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountTmpfs(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid tmpfs source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountInvalidType(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.Mount_MountType(9999), + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid mount type") { + t.Fatalf("expected error, got: %v", err) + } +} diff --git a/daemon/cluster/executor/container/validate_unix_test.go b/daemon/cluster/executor/container/validate_unix_test.go new file mode 100644 index 0000000..c616eee --- /dev/null +++ b/daemon/cluster/executor/container/validate_unix_test.go @@ -0,0 +1,8 @@ +// +build !windows + +package container + +const ( + testAbsPath = "/foo" + testAbsNonExistent = "/some-non-existing-host-path/" +) diff --git a/daemon/cluster/executor/container/validate_windows_test.go b/daemon/cluster/executor/container/validate_windows_test.go new file mode 100644 index 0000000..c346451 --- /dev/null +++ b/daemon/cluster/executor/container/validate_windows_test.go @@ -0,0 +1,8 @@ +// +build windows + +package container + +const ( + testAbsPath = `c:\foo` + testAbsNonExistent = `c:\some-non-existing-host-path\` +) diff --git a/daemon/cluster/filters.go b/daemon/cluster/filters.go new file mode 100644 index 0000000..0a004af --- /dev/null +++ b/daemon/cluster/filters.go @@ -0,0 +1,121 @@ +package cluster + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/filters" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" +) + +func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "role": true, + "membership": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + f := &swarmapi.ListNodesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + } + + for _, r := range filter.Get("role") { + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { + f.Roles = append(f.Roles, swarmapi.NodeRole(role)) + } else if r != "" { + return nil, fmt.Errorf("Invalid role filter: '%s'", r) + } + } + + for _, a := range filter.Get("membership") { + if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { + f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) + } else if a != "" { + return nil, fmt.Errorf("Invalid membership filter: '%s'", a) + } + } + + return f, nil +} + +func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "service": true, + "node": true, + "desired-state": true, + // UpToDate is not meant to be exposed to users. It's for + // internal use in checking create/update progress. Therefore, + // we prefix it with a '_'. + "_up-to-date": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + if transformFunc != nil { + if err := transformFunc(filter); err != nil { + return nil, err + } + } + f := &swarmapi.ListTasksRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + ServiceIDs: filter.Get("service"), + NodeIDs: filter.Get("node"), + UpToDate: len(filter.Get("_up-to-date")) != 0, + } + + for _, s := range filter.Get("desired-state") { + if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { + f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) + } else if s != "" { + return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) + } + } + + return f, nil +} + +func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) { + accepted := map[string]bool{ + "names": true, + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListSecretsRequest_Filters{ + Names: filter.Get("names"), + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} + +func newListConfigsFilters(filter filters.Args) (*swarmapi.ListConfigsRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListConfigsRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} diff --git a/daemon/cluster/filters_test.go b/daemon/cluster/filters_test.go new file mode 100644 index 0000000..fd0c8c3 --- /dev/null +++ b/daemon/cluster/filters_test.go @@ -0,0 +1,102 @@ +package cluster + +import ( + "testing" + + "github.com/docker/docker/api/types/filters" +) + +func TestNewListSecretsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validNamesFilter := filters.NewArgs() + validNamesFilter.Add("names", "test_name") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + validAllFilter.Add("names", "test_name") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validNamesFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListSecretsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListSecretsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} + +func TestNewListConfigsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListConfigsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListConfigsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} diff --git a/daemon/cluster/helpers.go b/daemon/cluster/helpers.go new file mode 100644 index 0000000..a74118c --- /dev/null +++ b/daemon/cluster/helpers.go @@ -0,0 +1,245 @@ +package cluster + +import ( + "fmt" + + "github.com/docker/docker/api/errors" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { + rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return nil, err + } + + if len(rl.Clusters) == 0 { + return nil, errors.NewRequestNotFoundError(errNoSwarm) + } + + // TODO: assume one cluster only + return rl.Clusters[0], nil +} + +func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { + // GetNode to match via full ID. + if rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}); err == nil { + return rg.Node, nil + } + + // If any error (including NotFound), ListNodes to match via full name. + rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{ + Filters: &swarmapi.ListNodesRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Nodes) == 0 { + // If any error or 0 result, ListNodes to match via ID prefix. + rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{ + Filters: &swarmapi.ListNodesRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Nodes) == 0 { + err := fmt.Errorf("node %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Nodes); l > 1 { + return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) + } + + return rl.Nodes[0], nil +} + +func getService(ctx context.Context, c swarmapi.ControlClient, input string, insertDefaults bool) (*swarmapi.Service, error) { + // GetService to match via full ID. + if rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input, InsertDefaults: insertDefaults}); err == nil { + return rg.Service, nil + } + + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{ + Filters: &swarmapi.ListServicesRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Services) == 0 { + // If any error or 0 result, ListServices to match via ID prefix. + rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{ + Filters: &swarmapi.ListServicesRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Services) == 0 { + err := fmt.Errorf("service %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Services); l > 1 { + return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) + } + + if !insertDefaults { + return rl.Services[0], nil + } + + rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: rl.Services[0].ID, InsertDefaults: true}) + if err == nil { + return rg.Service, nil + } + return nil, err +} + +func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { + // GetTask to match via full ID. + if rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}); err == nil { + return rg.Task, nil + } + + // If any error (including NotFound), ListTasks to match via full name. + rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{ + Filters: &swarmapi.ListTasksRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Tasks) == 0 { + // If any error or 0 result, ListTasks to match via ID prefix. + rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{ + Filters: &swarmapi.ListTasksRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Tasks) == 0 { + err := fmt.Errorf("task %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Tasks); l > 1 { + return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) + } + + return rl.Tasks[0], nil +} + +func getSecret(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Secret, error) { + // attempt to lookup secret by full ID + if rg, err := c.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: input}); err == nil { + return rg.Secret, nil + } + + // If any error (including NotFound), ListSecrets to match via full name. + rl, err := c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ + Filters: &swarmapi.ListSecretsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Secrets) == 0 { + // If any error or 0 result, ListSecrets to match via ID prefix. + rl, err = c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ + Filters: &swarmapi.ListSecretsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Secrets) == 0 { + err := fmt.Errorf("secret %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Secrets); l > 1 { + return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", input, l) + } + + return rl.Secrets[0], nil +} + +func getConfig(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Config, error) { + // attempt to lookup config by full ID + if rg, err := c.GetConfig(ctx, &swarmapi.GetConfigRequest{ConfigID: input}); err == nil { + return rg.Config, nil + } + + // If any error (including NotFound), ListConfigs to match via full name. + rl, err := c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Configs) == 0 { + // If any error or 0 result, ListConfigs to match via ID prefix. + rl, err = c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Configs) == 0 { + err := fmt.Errorf("config %s not found", input) + return nil, errors.NewRequestNotFoundError(err) + } + + if l := len(rl.Configs); l > 1 { + return nil, fmt.Errorf("config %s is ambiguous (%d matches found)", input, l) + } + + return rl.Configs[0], nil +} + +func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { + // GetNetwork to match via full ID. + if rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}); err == nil { + return rg.Network, nil + } + + // If any error (including NotFound), ListNetworks to match via ID prefix and full name. + rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ + Filters: &swarmapi.ListNetworksRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Networks) == 0 { + rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ + Filters: &swarmapi.ListNetworksRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Networks) == 0 { + return nil, fmt.Errorf("network %s not found", input) + } + + if l := len(rl.Networks); l > 1 { + return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) + } + + return rl.Networks[0], nil +} diff --git a/daemon/cluster/listen_addr.go b/daemon/cluster/listen_addr.go new file mode 100644 index 0000000..993ccb6 --- /dev/null +++ b/daemon/cluster/listen_addr.go @@ -0,0 +1,302 @@ +package cluster + +import ( + "errors" + "fmt" + "net" +) + +var ( + errNoSuchInterface = errors.New("no such interface") + errNoIP = errors.New("could not find the system's IP address") + errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified") + errBadNetworkIdentifier = errors.New("must specify a valid IP address or interface name") + errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") + errBadAdvertiseAddr = errors.New("advertise address must be a non-zero IP address or network interface (with optional port number)") + errBadDataPathAddr = errors.New("data path address must be a non-zero IP address or network interface (without a port number)") + errBadDefaultAdvertiseAddr = errors.New("default advertise address must be a non-zero IP address or network interface (without a port number)") +) + +func resolveListenAddr(specifiedAddr string) (string, string, error) { + specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr) + if err != nil { + return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) + } + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + specifiedIP, err := resolveInputIPAddr(specifiedHost, true) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadListenAddr + } + return "", "", err + } + + return specifiedIP.String(), specifiedPort, nil +} + +func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { + // Approach: + // - If an advertise address is specified, use that. Resolve the + // interface's address if an interface was specified in + // advertiseAddr. Fill in the port from listenAddrPort if necessary. + // - If DefaultAdvertiseAddr is not empty, use that with the port from + // listenAddrPort. Resolve the interface's address from + // if an interface name was specified in DefaultAdvertiseAddr. + // - Otherwise, try to autodetect the system's address. Use the port in + // listenAddrPort with this address if autodetection succeeds. + + if advertiseAddr != "" { + advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr) + if err != nil { + // Not a host:port specification + advertiseHost = advertiseAddr + advertisePort = listenAddrPort + } + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + advertiseIP, err := resolveInputIPAddr(advertiseHost, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadAdvertiseAddr + } + return "", "", err + } + + return advertiseIP.String(), advertisePort, nil + } + + if c.config.DefaultAdvertiseAddr != "" { + // Does the default advertise address component match any of the + // interface names on the system? If so, use the address from + // that interface. + defaultAdvertiseIP, err := resolveInputIPAddr(c.config.DefaultAdvertiseAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDefaultAdvertiseAddr + } + return "", "", err + } + + return defaultAdvertiseIP.String(), listenAddrPort, nil + } + + systemAddr, err := c.resolveSystemAddr() + if err != nil { + return "", "", err + } + return systemAddr.String(), listenAddrPort, nil +} + +func resolveDataPathAddr(dataPathAddr string) (string, error) { + if dataPathAddr == "" { + // dataPathAddr is not defined + return "", nil + } + // If a data path flag is specified try to resolve the IP address. + dataPathIP, err := resolveInputIPAddr(dataPathAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDataPathAddr + } + return "", err + } + return dataPathIP.String(), nil +} + +func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { + // Use a specific interface's IP address. + intf, err := net.InterfaceByName(specifiedInterface) + if err != nil { + return nil, errNoSuchInterface + } + + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + if ipAddr.IP.To4() != nil { + // IPv4 + if interfaceAddr4 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP) + } + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + if interfaceAddr6 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP) + } + interfaceAddr6 = ipAddr.IP + } + } + } + + if interfaceAddr4 == nil && interfaceAddr6 == nil { + return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface) + } + + // In the case that there's exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + return interfaceAddr4, nil + } + return interfaceAddr6, nil +} + +// resolveInputIPAddr tries to resolve the IP address from the string passed as input +// - tries to match the string as an interface name, if so returns the IP address associated with it +// - on failure of previous step tries to parse the string as an IP address itself +// if succeeds returns the IP address +func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) { + // Try to see if it is an interface name + interfaceAddr, err := resolveInterfaceAddr(input) + if err == nil { + return interfaceAddr, nil + } + // String matched interface but there is a potential ambiguity to be resolved + if err != errNoSuchInterface { + return nil, err + } + + // String is not an interface check if it is a valid IP + if ip := net.ParseIP(input); ip != nil && (isUnspecifiedValid || !ip.IsUnspecified()) { + return ip, nil + } + + // Not valid IP found + return nil, errBadNetworkIdentifier +} + +func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { + // Use the system's only IP address, or fail if there are + // multiple addresses to choose from. Skip interfaces which + // are managed by docker via subnet check. + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + var systemAddr net.IP + var systemInterface string + + // List Docker-managed subnets + v4Subnets, v6Subnets := c.config.NetworkSubnetsProvider.Subnets() + +ifaceLoop: + for _, intf := range interfaces { + // Skip inactive interfaces and loopback interfaces + if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 { + continue + } + + addrs, err := intf.Addrs() + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + // Skip loopback and link-local addresses + if !ok || !ipAddr.IP.IsGlobalUnicast() { + continue + } + + if ipAddr.IP.To4() != nil { + // IPv4 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v4Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr4, ipAddr.IP) + } + + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v6Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr6, ipAddr.IP) + } + + interfaceAddr6 = ipAddr.IP + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Name + } + } + + if systemAddr == nil { + return nil, errNoIP + } + + return systemAddr, nil +} + +func listSystemIPs() []net.IP { + interfaces, err := net.Interfaces() + if err != nil { + return nil + } + + var systemAddrs []net.IP + + for _, intf := range interfaces { + addrs, err := intf.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + systemAddrs = append(systemAddrs, ipAddr.IP) + } + } + } + + return systemAddrs +} + +func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error { + if interfaceA == interfaceB { + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB) + } + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB) +} diff --git a/daemon/cluster/listen_addr_linux.go b/daemon/cluster/listen_addr_linux.go new file mode 100644 index 0000000..3d4f239 --- /dev/null +++ b/daemon/cluster/listen_addr_linux.go @@ -0,0 +1,91 @@ +// +build linux + +package cluster + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + // Use the system's only device IP address, or fail if there are + // multiple addresses to choose from. + interfaces, err := netlink.LinkList() + if err != nil { + return nil, err + } + + var ( + systemAddr net.IP + systemInterface string + deviceFound bool + ) + + for _, intf := range interfaces { + // Skip non device or inactive interfaces + if intf.Type() != "device" || intf.Attrs().Flags&net.FlagUp == 0 { + continue + } + + addrs, err := netlink.AddrList(intf, netlink.FAMILY_ALL) + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr := addr.IPNet.IP + + // Skip loopback and link-local addresses + if !ipAddr.IsGlobalUnicast() { + continue + } + + // At least one non-loopback device is found and it is administratively up + deviceFound = true + + if ipAddr.To4() != nil { + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr4, ipAddr) + } + interfaceAddr4 = ipAddr + } else { + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr6, ipAddr) + } + interfaceAddr6 = ipAddr + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Attrs().Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Attrs().Name + } + } + + if systemAddr == nil { + if !deviceFound { + // If no non-loopback device type interface is found, + // fall back to the regular auto-detection mechanism. + // This is to cover the case where docker is running + // inside a container (eths are in fact veths). + return c.resolveSystemAddrViaSubnetCheck() + } + return nil, errNoIP + } + + return systemAddr, nil +} diff --git a/daemon/cluster/listen_addr_others.go b/daemon/cluster/listen_addr_others.go new file mode 100644 index 0000000..4e845f5 --- /dev/null +++ b/daemon/cluster/listen_addr_others.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris + +package cluster + +import "net" + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + return c.resolveSystemAddrViaSubnetCheck() +} diff --git a/daemon/cluster/listen_addr_solaris.go b/daemon/cluster/listen_addr_solaris.go new file mode 100644 index 0000000..57a894b --- /dev/null +++ b/daemon/cluster/listen_addr_solaris.go @@ -0,0 +1,57 @@ +package cluster + +import ( + "bufio" + "fmt" + "net" + "os/exec" + "strings" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " + + "`/usr/sbin/route get default | /usr/bin/grep interface | " + + "/usr/bin/awk '{print $2}'`" + out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output() + if err != nil { + return nil, fmt.Errorf("cannot get default route: %v", err) + } + + defInterface := strings.SplitN(string(out), "/", 2) + defInterfaceIP := net.ParseIP(defInterface[0]) + + return defInterfaceIP, nil +} + +func listSystemIPs() []net.IP { + var systemAddrs []net.IP + cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr") + cmdReader, err := cmd.StdoutPipe() + if err != nil { + return nil + } + + if err := cmd.Start(); err != nil { + return nil + } + + scanner := bufio.NewScanner(cmdReader) + go func() { + for scanner.Scan() { + text := scanner.Text() + nameAddrPair := strings.SplitN(text, "/", 2) + // Let go of loopback interfaces and docker interfaces + systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0])) + } + }() + + if err := scanner.Err(); err != nil { + fmt.Printf("scan underwent err: %+v\n", err) + } + + if err := cmd.Wait(); err != nil { + fmt.Printf("run command wait: %+v\n", err) + } + + return systemAddrs +} diff --git a/daemon/cluster/networks.go b/daemon/cluster/networks.go new file mode 100644 index 0000000..1906c37 --- /dev/null +++ b/daemon/cluster/networks.go @@ -0,0 +1,317 @@ +package cluster + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/runconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// GetNetworks returns all current cluster managed networks. +func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { + list, err := c.getNetworks(nil) + if err != nil { + return nil, err + } + removePredefinedNetworks(&list) + return list, nil +} + +func removePredefinedNetworks(networks *[]apitypes.NetworkResource) { + if networks == nil { + return + } + var idxs []int + for i, n := range *networks { + if v, ok := n.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + idxs = append(idxs, i) + } + } + for i, idx := range idxs { + idx -= i + *networks = append((*networks)[:idx], (*networks)[idx+1:]...) + } +} + +func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + networks := make([]apitypes.NetworkResource, 0, len(r.Networks)) + + for _, network := range r.Networks { + networks = append(networks, convert.BasicNetworkFromGRPC(*network)) + } + + return networks, nil +} + +// GetNetwork returns a cluster network by an ID. +func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { + var network *swarmapi.Network + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + n, err := getNetwork(ctx, state.controlClient, input) + if err != nil { + return err + } + network = n + return nil + }); err != nil { + return apitypes.NetworkResource{}, err + } + return convert.BasicNetworkFromGRPC(*network), nil +} + +// GetNetworksByName returns cluster managed networks by name. +// It is ok to have multiple networks here. #18864 +func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { + // Note that swarmapi.GetNetworkRequest.Name is not functional. + // So we cannot just use that with c.GetNetwork. + return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ + Names: []string{name}, + }) +} + +func attacherKey(target, containerID string) string { + return containerID + ":" + target +} + +// UpdateAttachment signals the attachment config to the attachment +// waiter who is trying to start or attach the container to the +// network. +func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { + c.mu.Lock() + attacher, ok := c.attachers[attacherKey(target, containerID)] + if !ok || attacher == nil { + c.mu.Unlock() + return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) + } + if attacher.inProgress { + logrus.Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID) + c.mu.Unlock() + return nil + } + attacher.inProgress = true + c.mu.Unlock() + + attacher.attachWaitCh <- config + + return nil +} + +// WaitForDetachment waits for the container to stop or detach from +// the network. +func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + c.mu.RLock() + attacher, ok := c.attachers[attacherKey(networkName, containerID)] + if !ok { + attacher, ok = c.attachers[attacherKey(networkID, containerID)] + } + state := c.currentNodeState() + if state.swarmNode == nil || state.swarmNode.Agent() == nil { + c.mu.RUnlock() + return errors.New("invalid cluster node while waiting for detachment") + } + + c.mu.RUnlock() + agent := state.swarmNode.Agent() + if ok && attacher != nil && + attacher.detachWaitCh != nil && + attacher.attachCompleteCh != nil { + // Attachment may be in progress still so wait for + // attachment to complete. + select { + case <-attacher.attachCompleteCh: + case <-ctx.Done(): + return ctx.Err() + } + + if attacher.taskID == taskID { + select { + case <-attacher.detachWaitCh: + case <-ctx.Done(): + return ctx.Err() + } + } + } + + return agent.ResourceAllocator().DetachNetwork(ctx, taskID) +} + +// AttachNetwork generates an attachment request towards the manager. +func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { + aKey := attacherKey(target, containerID) + c.mu.Lock() + state := c.currentNodeState() + if state.swarmNode == nil || state.swarmNode.Agent() == nil { + c.mu.Unlock() + return nil, errors.New("invalid cluster node while attaching to network") + } + if attacher, ok := c.attachers[aKey]; ok { + c.mu.Unlock() + return attacher.config, nil + } + + agent := state.swarmNode.Agent() + attachWaitCh := make(chan *network.NetworkingConfig) + detachWaitCh := make(chan struct{}) + attachCompleteCh := make(chan struct{}) + c.attachers[aKey] = &attacher{ + attachWaitCh: attachWaitCh, + attachCompleteCh: attachCompleteCh, + detachWaitCh: detachWaitCh, + } + c.mu.Unlock() + + ctx, cancel := c.getRequestContext() + defer cancel() + + taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) + if err != nil { + c.mu.Lock() + delete(c.attachers, aKey) + c.mu.Unlock() + return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) + } + + c.mu.Lock() + c.attachers[aKey].taskID = taskID + close(attachCompleteCh) + c.mu.Unlock() + + logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID) + + release := func() { + ctx, cancel := c.getRequestContext() + defer cancel() + if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil { + logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v", + taskID, target, err) + } + } + + var config *network.NetworkingConfig + select { + case config = <-attachWaitCh: + case <-ctx.Done(): + release() + return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) + } + + c.mu.Lock() + c.attachers[aKey].config = config + c.mu.Unlock() + + logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID) + + return config, nil +} + +// DetachNetwork unblocks the waiters waiting on WaitForDetachment so +// that a request to detach can be generated towards the manager. +func (c *Cluster) DetachNetwork(target string, containerID string) error { + aKey := attacherKey(target, containerID) + + c.mu.Lock() + attacher, ok := c.attachers[aKey] + delete(c.attachers, aKey) + c.mu.Unlock() + + if !ok { + return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) + } + + close(attacher.detachWaitCh) + return nil +} + +// CreateNetwork creates a new cluster managed network. +func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { + if runconfig.IsPreDefinedNetwork(s.Name) { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) + return "", apierrors.NewRequestForbiddenError(err) + } + + var resp *swarmapi.CreateNetworkResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + networkSpec := convert.BasicNetworkCreateToGRPC(s) + r, err := state.controlClient.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + + return resp.Network.ID, nil +} + +// RemoveNetwork removes a cluster network. +func (c *Cluster) RemoveNetwork(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + network, err := getNetwork(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}) + return err + }) +} + +func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { + // Always prefer NetworkAttachmentConfigs from TaskTemplate + // but fallback to service spec for backward compatibility + networks := s.TaskTemplate.Networks + if len(networks) == 0 { + networks = s.Networks + } + for i, n := range networks { + apiNetwork, err := getNetwork(ctx, client, n.Target) + if err != nil { + ln, _ := c.config.Backend.FindNetwork(n.Target) + if ln != nil && runconfig.IsPreDefinedNetwork(ln.Name()) { + // Need to retrieve the corresponding predefined swarm network + // and use its id for the request. + apiNetwork, err = getNetwork(ctx, client, ln.Name()) + if err != nil { + err = fmt.Errorf("could not find the corresponding predefined swarm network: %v", err) + return apierrors.NewRequestNotFoundError(err) + } + goto setid + } + if ln != nil && !ln.Info().Dynamic() { + err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) + return apierrors.NewRequestForbiddenError(err) + } + return err + } + setid: + networks[i].Target = apiNetwork.ID + } + return nil +} diff --git a/daemon/cluster/noderunner.go b/daemon/cluster/noderunner.go new file mode 100644 index 0000000..c0c7529 --- /dev/null +++ b/daemon/cluster/noderunner.go @@ -0,0 +1,371 @@ +package cluster + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + lncluster "github.com/docker/libnetwork/cluster" + swarmapi "github.com/docker/swarmkit/api" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed. +type nodeRunner struct { + nodeState + mu sync.RWMutex + done chan struct{} // closed when swarmNode exits + ready chan struct{} // closed when swarmNode becomes active + reconnectDelay time.Duration + config nodeStartConfig + + repeatedRun bool + cancelReconnect func() + stopping bool + cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct +} + +// nodeStartConfig holds configuration needed to start a new node. Exported +// fields of this structure are saved to disk in json. Unexported fields +// contain data that shouldn't be persisted between daemon reloads. +type nodeStartConfig struct { + // LocalAddr is this machine's local IP or hostname, if specified. + LocalAddr string + // RemoteAddr is the address that was given to "swarm join". It is used + // to find LocalAddr if necessary. + RemoteAddr string + // ListenAddr is the address we bind to, including a port. + ListenAddr string + // AdvertiseAddr is the address other nodes should connect to, + // including a port. + AdvertiseAddr string + // DataPathAddr is the address that has to be used for the data path + DataPathAddr string + // JoinInProgress is set to true if a join operation has started, but + // not completed yet. + JoinInProgress bool + + joinAddr string + forceNewCluster bool + joinToken string + lockKey []byte + autolock bool + availability types.NodeAvailability +} + +func (n *nodeRunner) Ready() chan error { + c := make(chan error, 1) + n.mu.RLock() + ready, done := n.ready, n.done + n.mu.RUnlock() + go func() { + select { + case <-ready: + case <-done: + } + select { + case <-ready: + default: + n.mu.RLock() + c <- n.err + n.mu.RUnlock() + } + close(c) + }() + return c +} + +func (n *nodeRunner) Start(conf nodeStartConfig) error { + n.mu.Lock() + defer n.mu.Unlock() + + n.reconnectDelay = initialReconnectDelay + + return n.start(conf) +} + +func (n *nodeRunner) start(conf nodeStartConfig) error { + var control string + if runtime.GOOS == "windows" { + control = `\\.\pipe\` + controlSocket + } else { + control = filepath.Join(n.cluster.runtimeRoot, controlSocket) + } + + joinAddr := conf.joinAddr + if joinAddr == "" && conf.JoinInProgress { + // We must have been restarted while trying to join a cluster. + // Continue trying to join instead of forming our own cluster. + joinAddr = conf.RemoteAddr + } + + // Hostname is not set here. Instead, it is obtained from + // the node description that is reported periodically + swarmnodeConfig := swarmnode.Config{ + ForceNewCluster: conf.forceNewCluster, + ListenControlAPI: control, + ListenRemoteAPI: conf.ListenAddr, + AdvertiseRemoteAPI: conf.AdvertiseAddr, + JoinAddr: joinAddr, + StateDir: n.cluster.root, + JoinToken: conf.joinToken, + Executor: container.NewExecutor(n.cluster.config.Backend), + HeartbeatTick: 1, + ElectionTick: 3, + UnlockKey: conf.lockKey, + AutoLockManagers: conf.autolock, + PluginGetter: n.cluster.config.Backend.PluginGetter(), + } + if conf.availability != "" { + avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))] + if !ok { + return fmt.Errorf("invalid Availability: %q", conf.availability) + } + swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail) + } + node, err := swarmnode.New(&swarmnodeConfig) + if err != nil { + return err + } + if err := node.Start(context.Background()); err != nil { + return err + } + + n.done = make(chan struct{}) + n.ready = make(chan struct{}) + n.swarmNode = node + if conf.joinAddr != "" { + conf.JoinInProgress = true + } + n.config = conf + savePersistentState(n.cluster.root, conf) + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + n.handleNodeExit(node) + cancel() + }() + + go n.handleReadyEvent(ctx, node, n.ready) + go n.handleControlSocketChange(ctx, node) + + return nil +} + +func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) { + for conn := range node.ListenControlSocket(ctx) { + n.mu.Lock() + if n.grpcConn != conn { + if conn == nil { + n.controlClient = nil + n.logsClient = nil + } else { + n.controlClient = swarmapi.NewControlClient(conn) + n.logsClient = swarmapi.NewLogsClient(conn) + // push store changes to daemon + go n.watchClusterEvents(ctx, conn) + } + } + n.grpcConn = conn + n.mu.Unlock() + n.cluster.SendClusterEvent(lncluster.EventSocketChange) + } +} + +func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) { + client := swarmapi.NewWatchClient(conn) + watch, err := client.Watch(ctx, &swarmapi.WatchRequest{ + Entries: []*swarmapi.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "service", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "network", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "secret", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + }, + IncludeOldObject: true, + }) + if err != nil { + logrus.WithError(err).Error("failed to watch cluster store") + return + } + for { + msg, err := watch.Recv() + if err != nil { + // store watch is broken + logrus.WithError(err).Error("failed to receive changes from store watch API") + return + } + select { + case <-ctx.Done(): + return + case n.cluster.watchStream <- msg: + } + } +} + +func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) { + select { + case <-node.Ready(): + n.mu.Lock() + n.err = nil + if n.config.JoinInProgress { + n.config.JoinInProgress = false + savePersistentState(n.cluster.root, n.config) + } + n.mu.Unlock() + close(ready) + case <-ctx.Done(): + } + n.cluster.SendClusterEvent(lncluster.EventNodeReady) +} + +func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) { + err := detectLockedError(node.Err(context.Background())) + if err != nil { + logrus.Errorf("cluster exited with error: %v", err) + } + n.mu.Lock() + n.swarmNode = nil + n.err = err + close(n.done) + select { + case <-n.ready: + n.enableReconnectWatcher() + default: + if n.repeatedRun { + n.enableReconnectWatcher() + } + } + n.repeatedRun = true + n.mu.Unlock() +} + +// Stop stops the current swarm node if it is running. +func (n *nodeRunner) Stop() error { + n.mu.Lock() + if n.cancelReconnect != nil { // between restarts + n.cancelReconnect() + n.cancelReconnect = nil + } + if n.swarmNode == nil { + n.mu.Unlock() + return nil + } + n.stopping = true + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + n.mu.Unlock() + if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { + return err + } + n.cluster.SendClusterEvent(lncluster.EventNodeLeave) + <-n.done + return nil +} + +func (n *nodeRunner) State() nodeState { + if n == nil { + return nodeState{status: types.LocalNodeStateInactive} + } + n.mu.RLock() + defer n.mu.RUnlock() + + ns := n.nodeState + + if ns.err != nil || n.cancelReconnect != nil { + if errors.Cause(ns.err) == errSwarmLocked { + ns.status = types.LocalNodeStateLocked + } else { + ns.status = types.LocalNodeStateError + } + } else { + select { + case <-n.ready: + ns.status = types.LocalNodeStateActive + default: + ns.status = types.LocalNodeStatePending + } + } + + return ns +} + +func (n *nodeRunner) enableReconnectWatcher() { + if n.stopping { + return + } + n.reconnectDelay *= 2 + if n.reconnectDelay > maxReconnectDelay { + n.reconnectDelay = maxReconnectDelay + } + logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) + delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) + n.cancelReconnect = cancel + + go func() { + <-delayCtx.Done() + if delayCtx.Err() != context.DeadlineExceeded { + return + } + n.mu.Lock() + defer n.mu.Unlock() + if n.stopping { + return + } + + if err := n.start(n.config); err != nil { + n.err = err + } + }() +} + +// nodeState represents information about the current state of the cluster and +// provides access to the grpc clients. +type nodeState struct { + swarmNode *swarmnode.Node + grpcConn *grpc.ClientConn + controlClient swarmapi.ControlClient + logsClient swarmapi.LogsClient + status types.LocalNodeState + actualLocalAddr string + err error +} + +// IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true. +func (ns nodeState) IsActiveManager() bool { + return ns.controlClient != nil +} + +// IsManager returns true if node is a manager. +func (ns nodeState) IsManager() bool { + return ns.swarmNode != nil && ns.swarmNode.Manager() != nil +} + +// NodeID returns node's ID or empty string if node is inactive. +func (ns nodeState) NodeID() string { + if ns.swarmNode != nil { + return ns.swarmNode.NodeID() + } + return "" +} diff --git a/daemon/cluster/nodes.go b/daemon/cluster/nodes.go new file mode 100644 index 0000000..839c8f7 --- /dev/null +++ b/daemon/cluster/nodes.go @@ -0,0 +1,104 @@ +package cluster + +import ( + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetNodes returns a list of all nodes known to a cluster. +func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListNodesFilters(options.Filters) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListNodes( + ctx, + &swarmapi.ListNodesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + nodes := make([]types.Node, 0, len(r.Nodes)) + + for _, node := range r.Nodes { + nodes = append(nodes, convert.NodeFromGRPC(*node)) + } + return nodes, nil +} + +// GetNode returns a node based on an ID. +func (c *Cluster) GetNode(input string) (types.Node, error) { + var node *swarmapi.Node + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + n, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + node = n + return nil + }); err != nil { + return types.Node{}, err + } + + return convert.NodeFromGRPC(*node), nil +} + +// UpdateNode updates existing nodes properties. +func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + nodeSpec, err := convert.NodeSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + currentNode, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.UpdateNode( + ctx, + &swarmapi.UpdateNodeRequest{ + NodeID: currentNode.ID, + Spec: &nodeSpec, + NodeVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + return err + }) +} + +// RemoveNode removes a node from a cluster +func (c *Cluster) RemoveNode(input string, force bool) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + node, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}) + return err + }) +} diff --git a/daemon/cluster/provider/network.go b/daemon/cluster/provider/network.go new file mode 100644 index 0000000..f4c72ae --- /dev/null +++ b/daemon/cluster/provider/network.go @@ -0,0 +1,37 @@ +package provider + +import "github.com/docker/docker/api/types" + +// NetworkCreateRequest is a request when creating a network. +type NetworkCreateRequest struct { + ID string + types.NetworkCreateRequest +} + +// NetworkCreateResponse is a response when creating a network. +type NetworkCreateResponse struct { + ID string `json:"Id"` +} + +// VirtualAddress represents a virtual address. +type VirtualAddress struct { + IPv4 string + IPv6 string +} + +// PortConfig represents a port configuration. +type PortConfig struct { + Name string + Protocol int32 + TargetPort uint32 + PublishedPort uint32 +} + +// ServiceConfig represents a service configuration. +type ServiceConfig struct { + ID string + Name string + Aliases map[string][]string + VirtualAddresses map[string]*VirtualAddress + ExposedPorts []*PortConfig +} diff --git a/daemon/cluster/secrets.go b/daemon/cluster/secrets.go new file mode 100644 index 0000000..3947286 --- /dev/null +++ b/daemon/cluster/secrets.go @@ -0,0 +1,117 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetSecret returns a secret from a managed swarm cluster +func (c *Cluster) GetSecret(input string) (types.Secret, error) { + var secret *swarmapi.Secret + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + secret = s + return nil + }); err != nil { + return types.Secret{}, err + } + return convert.SecretFromGRPC(secret), nil +} + +// GetSecrets returns all secrets of a managed swarm cluster. +func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListSecretsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListSecrets(ctx, + &swarmapi.ListSecretsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + secrets := make([]types.Secret, 0, len(r.Secrets)) + + for _, secret := range r.Secrets { + secrets = append(secrets, convert.SecretFromGRPC(secret)) + } + + return secrets, nil +} + +// CreateSecret creates a new secret in a managed swarm cluster. +func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) { + var resp *swarmapi.CreateSecretResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secretSpec := convert.SecretSpecToGRPC(s) + + r, err := state.controlClient.CreateSecret(ctx, + &swarmapi.CreateSecretRequest{Spec: &secretSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + return resp.Secret.ID, nil +} + +// RemoveSecret removes a secret from a managed swarm cluster. +func (c *Cluster) RemoveSecret(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secret, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveSecretRequest{ + SecretID: secret.ID, + } + + _, err = state.controlClient.RemoveSecret(ctx, req) + return err + }) +} + +// UpdateSecret updates a secret in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateSecret(input string, version uint64, spec types.SecretSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secret, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + + secretSpec := convert.SecretSpecToGRPC(spec) + + _, err = state.controlClient.UpdateSecret(ctx, + &swarmapi.UpdateSecretRequest{ + SecretID: secret.ID, + SecretVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &secretSpec, + }) + return err + }) +} diff --git a/daemon/cluster/services.go b/daemon/cluster/services.go new file mode 100644 index 0000000..f4416c2 --- /dev/null +++ b/daemon/cluster/services.go @@ -0,0 +1,552 @@ +package cluster + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/daemon/cluster/convert" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// GetServices returns all services of a managed swarm cluster. +func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + // We move the accepted filter check here as "mode" filter + // is processed in the daemon, not in SwarmKit. So it might + // be good to have accepted file check in the same file as + // the filter processing (in the for loop below). + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "mode": true, + "runtime": true, + } + if err := options.Filters.Validate(accepted); err != nil { + return nil, err + } + + filters := &swarmapi.ListServicesRequest_Filters{ + NamePrefixes: options.Filters.Get("name"), + IDPrefixes: options.Filters.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")), + // (ehazlett): hardcode runtime for now. eventually we will + // be able to filter for the desired runtimes once more + // are supported. + Runtimes: []string{string(types.RuntimeContainer)}, + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListServices( + ctx, + &swarmapi.ListServicesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + services := make([]types.Service, 0, len(r.Services)) + + for _, service := range r.Services { + if options.Filters.Include("mode") { + var mode string + switch service.Spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + mode = "global" + case *swarmapi.ServiceSpec_Replicated: + mode = "replicated" + } + + if !options.Filters.ExactMatch("mode", mode) { + continue + } + } + svcs, err := convert.ServiceFromGRPC(*service) + if err != nil { + return nil, err + } + services = append(services, svcs) + } + + return services, nil +} + +// GetService returns a service based on an ID or name. +func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) { + var service *swarmapi.Service + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getService(ctx, state.controlClient, input, insertDefaults) + if err != nil { + return err + } + service = s + return nil + }); err != nil { + return types.Service{}, err + } + svc, err := convert.ServiceFromGRPC(*service) + if err != nil { + return types.Service{}, err + } + return svc, nil +} + +// CreateService creates a new service in a managed swarm cluster. +func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) { + var resp *apitypes.ServiceCreateResponse + err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + err := c.populateNetworkID(ctx, state.controlClient, &s) + if err != nil { + return err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(s) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + resp = &apitypes.ServiceCreateResponse{} + + switch serviceSpec.Task.Runtime.(type) { + // handle other runtimes here + case *swarmapi.TaskSpec_Container: + ctnr := serviceSpec.Task.GetContainer() + if ctnr == nil { + return errors.New("service does not use container tasks") + } + if encodedAuth != "" { + ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) + + } else if ctnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + ctnr.Image = digestImage + + } else { + logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to create a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + + r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return err + } + + resp.ID = r.Service.ID + } + return nil + }) + + return resp, err +} + +// UpdateService updates existing service to match new properties. +func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) { + var resp *apitypes.ServiceUpdateResponse + + err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + + err := c.populateNetworkID(ctx, state.controlClient, &spec) + if err != nil { + return err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false) + if err != nil { + return err + } + + newCtnr := serviceSpec.Task.GetContainer() + if newCtnr == nil { + return errors.New("service does not use container tasks") + } + + encodedAuth := flags.EncodedRegistryAuth + if encodedAuth != "" { + newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } else { + // this is needed because if the encodedAuth isn't being updated then we + // shouldn't lose it, and continue to use the one that was already present + var ctnr *swarmapi.ContainerSpec + switch flags.RegistryAuthFrom { + case apitypes.RegistryAuthFromSpec, "": + ctnr = currentService.Spec.Task.GetContainer() + case apitypes.RegistryAuthFromPreviousSpec: + if currentService.PreviousSpec == nil { + return errors.New("service does not have a previous spec") + } + ctnr = currentService.PreviousSpec.Task.GetContainer() + default: + return errors.New("unsupported registryAuthFrom value") + } + if ctnr == nil { + return errors.New("service does not use container tasks") + } + newCtnr.PullOptions = ctnr.PullOptions + // update encodedAuth so it can be used to pin image by digest + if ctnr.PullOptions != nil { + encodedAuth = ctnr.PullOptions.RegistryAuth + } + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + resp = &apitypes.ServiceUpdateResponse{} + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) + } else if newCtnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + newCtnr.Image = digestImage + } else { + logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to update a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + + var rollback swarmapi.UpdateServiceRequest_Rollback + switch flags.Rollback { + case "", "none": + rollback = swarmapi.UpdateServiceRequest_NONE + case "previous": + rollback = swarmapi.UpdateServiceRequest_PREVIOUS + default: + return fmt.Errorf("unrecognized rollback option %s", flags.Rollback) + } + + _, err = state.controlClient.UpdateService( + ctx, + &swarmapi.UpdateServiceRequest{ + ServiceID: currentService.ID, + Spec: &serviceSpec, + ServiceVersion: &swarmapi.Version{ + Index: version, + }, + Rollback: rollback, + }, + ) + return err + }) + return resp, err +} + +// RemoveService removes a service from a managed swarm cluster. +func (c *Cluster) RemoveService(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + service, err := getService(ctx, state.controlClient, input, false) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}) + return err + }) +} + +// ServiceLogs collects service logs and writes them back to `config.OutStream` +func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + swarmSelector, err := convertSelector(ctx, state.controlClient, selector) + if err != nil { + return nil, errors.Wrap(err, "error making log selector") + } + + // set the streams we'll use + stdStreams := []swarmapi.LogStream{} + if config.ShowStdout { + stdStreams = append(stdStreams, swarmapi.LogStreamStdout) + } + if config.ShowStderr { + stdStreams = append(stdStreams, swarmapi.LogStreamStderr) + } + + // Get tail value squared away - the number of previous log lines we look at + var tail int64 + // in ContainerLogs, if the tail value is ANYTHING non-integer, we just set + // it to -1 (all). i don't agree with that, but i also think no tail value + // should be legitimate. if you don't pass tail, we assume you want "all" + if config.Tail == "all" || config.Tail == "" { + // tail of 0 means send all logs on the swarmkit side + tail = 0 + } else { + t, err := strconv.Atoi(config.Tail) + if err != nil { + return nil, errors.New("tail value must be a positive integer or \"all\"") + } + if t < 0 { + return nil, errors.New("negative tail values not supported") + } + // we actually use negative tail in swarmkit to represent messages + // backwards starting from the beginning. also, -1 means no logs. so, + // basically, for api compat with docker container logs, add one and + // flip the sign. we error above if you try to negative tail, which + // isn't supported by docker (and would error deeper in the stack + // anyway) + // + // See the logs protobuf for more information + tail = int64(-(t + 1)) + } + + // get the since value - the time in the past we're looking at logs starting from + var sinceProto *gogotypes.Timestamp + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return nil, errors.Wrap(err, "could not parse since timestamp") + } + since := time.Unix(s, n) + sinceProto, err = gogotypes.TimestampProto(since) + if err != nil { + return nil, errors.Wrap(err, "could not parse timestamp to proto") + } + } + + stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ + Selector: swarmSelector, + Options: &swarmapi.LogSubscriptionOptions{ + Follow: config.Follow, + Streams: stdStreams, + Tail: tail, + Since: sinceProto, + }, + }) + if err != nil { + return nil, err + } + + messageChan := make(chan *backend.LogMessage, 1) + go func() { + defer close(messageChan) + for { + // Check the context before doing anything. + select { + case <-ctx.Done(): + return + default: + } + subscribeMsg, err := stream.Recv() + if err == io.EOF { + return + } + // if we're not io.EOF, push the message in and return + if err != nil { + select { + case <-ctx.Done(): + case messageChan <- &backend.LogMessage{Err: err}: + } + return + } + + for _, msg := range subscribeMsg.Messages { + // make a new message + m := new(backend.LogMessage) + m.Attrs = make(backend.LogAttributes) + // add the timestamp, adding the error if it fails + m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp) + if err != nil { + m.Err = err + } + // copy over all of the details + for _, d := range msg.Attrs { + m.Attrs[d.Key] = d.Value + } + // we have the final say over context details (in case there + // is a conflict (if the user added a detail with a context's + // key for some reason)) + m.Attrs[contextPrefix+".node.id"] = msg.Context.NodeID + m.Attrs[contextPrefix+".service.id"] = msg.Context.ServiceID + m.Attrs[contextPrefix+".task.id"] = msg.Context.TaskID + + switch msg.Stream { + case swarmapi.LogStreamStdout: + m.Source = "stdout" + case swarmapi.LogStreamStderr: + m.Source = "stderr" + } + m.Line = msg.Data + + // there could be a case where the reader stops accepting + // messages and the context is canceled. we need to check that + // here, or otherwise we risk blocking forever on the message + // send. + select { + case <-ctx.Done(): + return + case messageChan <- m: + } + } + } + }() + return messageChan, nil +} + +// convertSelector takes a backend.LogSelector, which contains raw names that +// may or may not be valid, and converts them to an api.LogSelector proto. It +// returns an error if something fails +func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) { + // don't rely on swarmkit to resolve IDs, do it ourselves + swarmSelector := &swarmapi.LogSelector{} + for _, s := range selector.Services { + service, err := getService(ctx, cc, s, false) + if err != nil { + return nil, err + } + c := service.Spec.Task.GetContainer() + if c == nil { + return nil, errors.New("logs only supported on container tasks") + } + swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID) + } + for _, t := range selector.Tasks { + task, err := getTask(ctx, cc, t) + if err != nil { + return nil, err + } + c := task.Spec.GetContainer() + if c == nil { + return nil, errors.New("logs only supported on container tasks") + } + swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID) + } + return swarmSelector, nil +} + +// imageWithDigestString takes an image such as name or name:tag +// and returns the image pinned to a digest, such as name@sha256:34234 +func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { + ref, err := reference.ParseAnyReference(image) + if err != nil { + return "", err + } + namedRef, ok := ref.(reference.Named) + if !ok { + if _, ok := ref.(reference.Digested); ok { + return image, nil + } + return "", errors.Errorf("unknown image reference format: %s", image) + } + // only query registry if not a canonical reference (i.e. with digest) + if _, ok := namedRef.(reference.Canonical); !ok { + namedRef = reference.TagNameOnly(namedRef) + + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return "", errors.Errorf("image reference not tagged: %s", image) + } + + repo, _, err := c.config.Backend.GetRepository(ctx, taggedRef, authConfig) + if err != nil { + return "", err + } + dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag()) + if err != nil { + return "", err + } + + namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest) + if err != nil { + return "", err + } + // return familiar form until interface updated to return type + return reference.FamiliarString(namedDigestedRef), nil + } + // reference already contains a digest, so just return it + return reference.FamiliarString(ref), nil +} + +// digestWarning constructs a formatted warning string +// using the image name that could not be pinned by digest. The +// formatting is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} diff --git a/daemon/cluster/swarm.go b/daemon/cluster/swarm.go new file mode 100644 index 0000000..ef0596b --- /dev/null +++ b/daemon/cluster/swarm.go @@ -0,0 +1,549 @@ +package cluster + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Init initializes new cluster from user provided request. +func (c *Cluster) Init(req types.InitRequest) (string, error) { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + if c.nr != nil { + if req.ForceNewCluster { + // Take c.mu temporarily to wait for presently running + // API handlers to finish before shutting down the node. + c.mu.Lock() + c.mu.Unlock() + + if err := c.nr.Stop(); err != nil { + return "", err + } + } else { + return "", errSwarmExists + } + } + + if err := validateAndSanitizeInitRequest(&req); err != nil { + return "", apierrors.NewBadRequestError(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + return "", err + } + + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + if err != nil { + return "", err + } + + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return "", err + } + + localAddr := listenHost + + // If the local address is undetermined, the advertise address + // will be used as local address, if it belongs to this system. + // If the advertise address is not local, then we try to find + // a system address to use as local address. If this fails, + // we give up and ask the user to pass the listen address. + if net.ParseIP(localAddr).IsUnspecified() { + advertiseIP := net.ParseIP(advertiseHost) + + found := false + for _, systemIP := range listSystemIPs() { + if systemIP.Equal(advertiseIP) { + localAddr = advertiseIP.String() + found = true + break + } + } + + if !found { + ip, err := c.resolveSystemAddr() + if err != nil { + logrus.Warnf("Could not find a local address: %v", err) + return "", errMustSpecifyListenAddr + } + localAddr = ip.String() + } + } + + nr, err := c.newNodeRunner(nodeStartConfig{ + forceNewCluster: req.ForceNewCluster, + autolock: req.AutoLockManagers, + LocalAddr: localAddr, + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), + DataPathAddr: dataPathAddr, + availability: req.Availability, + }) + if err != nil { + return "", err + } + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + if err := <-nr.Ready(); err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + if !req.ForceNewCluster { // if failure on first attempt don't keep state + if err := clearPersistentState(c.root); err != nil { + return "", err + } + } + return "", err + } + state := nr.State() + if state.swarmNode == nil { // should never happen but protect from panic + return "", errors.New("invalid cluster state for spec initialization") + } + if err := initClusterSpec(state.swarmNode, req.Spec); err != nil { + return "", err + } + return state.NodeID(), nil +} + +// Join makes current Cluster part of an existing swarm cluster. +func (c *Cluster) Join(req types.JoinRequest) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + c.mu.Lock() + if c.nr != nil { + c.mu.Unlock() + return errSwarmExists + } + c.mu.Unlock() + + if err := validateAndSanitizeJoinRequest(&req); err != nil { + return apierrors.NewBadRequestError(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + return err + } + + var advertiseAddr string + if req.AdvertiseAddr != "" { + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + // For joining, we don't need to provide an advertise address, + // since the remote side can detect it. + if err == nil { + advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) + } + } + + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return err + } + + nr, err := c.newNodeRunner(nodeStartConfig{ + RemoteAddr: req.RemoteAddrs[0], + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: advertiseAddr, + DataPathAddr: dataPathAddr, + joinAddr: req.RemoteAddrs[0], + joinToken: req.JoinToken, + availability: req.Availability, + }) + if err != nil { + return err + } + + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + select { + case <-time.After(swarmConnectTimeout): + return errSwarmJoinTimeoutReached + case err := <-nr.Ready(): + if err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + if err := clearPersistentState(c.root); err != nil { + return err + } + } + return err + } +} + +// Inspect retrieves the configuration properties of a managed swarm cluster. +func (c *Cluster) Inspect() (types.Swarm, error) { + var swarm *swarmapi.Cluster + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getSwarm(ctx, state.controlClient) + if err != nil { + return err + } + swarm = s + return nil + }); err != nil { + return types.Swarm{}, err + } + return convert.SwarmFromGRPC(*swarm), nil +} + +// Update updates configuration of a managed swarm cluster. +func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + swarm, err := getSwarm(ctx, state.controlClient) + if err != nil { + return err + } + + // In update, client should provide the complete spec of the swarm, including + // Name and Labels. If a field is specified with 0 or nil, then the default value + // will be used to swarmkit. + clusterSpec, err := convert.SwarmSpecToGRPC(spec) + if err != nil { + return apierrors.NewBadRequestError(err) + } + + _, err = state.controlClient.UpdateCluster( + ctx, + &swarmapi.UpdateClusterRequest{ + ClusterID: swarm.ID, + Spec: &clusterSpec, + ClusterVersion: &swarmapi.Version{ + Index: version, + }, + Rotation: swarmapi.KeyRotation{ + WorkerJoinToken: flags.RotateWorkerToken, + ManagerJoinToken: flags.RotateManagerToken, + ManagerUnlockKey: flags.RotateManagerUnlockKey, + }, + }, + ) + return err + }) +} + +// GetUnlockKey returns the unlock key for the swarm. +func (c *Cluster) GetUnlockKey() (string, error) { + var resp *swarmapi.GetUnlockKeyResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + client := swarmapi.NewCAClient(state.grpcConn) + + r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + if len(resp.UnlockKey) == 0 { + // no key + return "", nil + } + return encryption.HumanReadableKey(resp.UnlockKey), nil +} + +// UnlockSwarm provides a key to decrypt data that is encrypted at rest. +func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.RLock() + state := c.currentNodeState() + + if !state.IsActiveManager() { + // when manager is not active, + // unless it is locked, otherwise return error. + if err := c.errNoManager(state); err != errSwarmLocked { + c.mu.RUnlock() + return err + } + } else { + // when manager is active, return an error of "not locked" + c.mu.RUnlock() + return errors.New("swarm is not locked") + } + + // only when swarm is locked, code running reaches here + nr := c.nr + c.mu.RUnlock() + + key, err := encryption.ParseHumanReadableKey(req.UnlockKey) + if err != nil { + return err + } + + config := nr.config + config.lockKey = key + if err := nr.Stop(); err != nil { + return err + } + nr, err = c.newNodeRunner(config) + if err != nil { + return err + } + + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + if err := <-nr.Ready(); err != nil { + if errors.Cause(err) == errSwarmLocked { + return errors.New("swarm could not be unlocked: invalid key provided") + } + return fmt.Errorf("swarm component could not be started: %v", err) + } + return nil +} + +// Leave shuts down Cluster and removes current state. +func (c *Cluster) Leave(force bool) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.Lock() + nr := c.nr + if nr == nil { + c.mu.Unlock() + return errNoSwarm + } + + state := c.currentNodeState() + + c.mu.Unlock() + + if errors.Cause(state.err) == errSwarmLocked && !force { + // leave a locked swarm without --force is not allowed + return errors.New("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message.") + } + + if state.IsManager() && !force { + msg := "You are attempting to leave the swarm on a node that is participating as a manager. " + if state.IsActiveManager() { + active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) + if err == nil { + if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { + if isLastManager(reachable, unreachable) { + msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " + return errors.New(msg) + } + msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) + } + } + } else { + msg += "Doing so may lose the consensus of your cluster. " + } + + msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." + return errors.New(msg) + } + // release readers in here + if err := nr.Stop(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + return err + } + + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + + if nodeID := state.NodeID(); nodeID != "" { + nodeContainers, err := c.listContainerForNode(nodeID) + if err != nil { + return err + } + for _, id := range nodeContainers { + if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("error removing %v: %v", id, err) + } + } + } + + // todo: cleanup optional? + if err := clearPersistentState(c.root); err != nil { + return err + } + c.config.Backend.DaemonLeavesCluster() + return nil +} + +// Info returns information about the current cluster state. +func (c *Cluster) Info() types.Info { + info := types.Info{ + NodeAddr: c.GetAdvertiseAddress(), + } + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + info.LocalNodeState = state.status + if state.err != nil { + info.Error = state.err.Error() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + if state.IsActiveManager() { + info.ControlAvailable = true + swarm, err := c.Inspect() + if err != nil { + info.Error = err.Error() + } + + info.Cluster = &swarm.ClusterInfo + + if r, err := state.controlClient.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err != nil { + info.Error = err.Error() + } else { + info.Nodes = len(r.Nodes) + for _, n := range r.Nodes { + if n.ManagerStatus != nil { + info.Managers = info.Managers + 1 + } + } + } + } + + if state.swarmNode != nil { + for _, r := range state.swarmNode.Remotes() { + info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) + } + info.NodeID = state.swarmNode.NodeID() + } + + return info +} + +func validateAndSanitizeInitRequest(req *types.InitRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + + if req.Spec.Annotations.Name == "" { + req.Spec.Annotations.Name = "default" + } else if req.Spec.Annotations.Name != "default" { + return errors.New(`swarm spec must be named "default"`) + } + + return nil +} + +func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + if len(req.RemoteAddrs) == 0 { + return errors.New("at least 1 RemoteAddr is required to join") + } + for i := range req.RemoteAddrs { + req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) + if err != nil { + return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) + } + } + return nil +} + +func validateAddr(addr string) (string, error) { + if addr == "" { + return addr, errors.New("invalid empty address") + } + newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) + if err != nil { + return addr, nil + } + return strings.TrimPrefix(newaddr, "tcp://"), nil +} + +func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + for conn := range node.ListenControlSocket(ctx) { + if ctx.Err() != nil { + return ctx.Err() + } + if conn != nil { + client := swarmapi.NewControlClient(conn) + var cluster *swarmapi.Cluster + for i := 0; ; i++ { + lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return fmt.Errorf("error on listing clusters: %v", err) + } + if len(lcr.Clusters) == 0 { + if i < 10 { + time.Sleep(200 * time.Millisecond) + continue + } + return errors.New("empty list of clusters was returned") + } + cluster = lcr.Clusters[0] + break + } + // In init, we take the initial default values from swarmkit, and merge + // any non nil or 0 value from spec to GRPC spec. This will leave the + // default value alone. + // Note that this is different from Update(), as in Update() we expect + // user to specify the complete spec of the cluster (as they already know + // the existing one and knows which field to update) + clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: &clusterSpec, + }) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + return nil + } + } + return ctx.Err() +} + +func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { + var ids []string + filters := filters.NewArgs() + filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) + containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + Filters: filters, + }) + if err != nil { + return []string{}, err + } + for _, c := range containers { + ids = append(ids, c.ID) + } + return ids, nil +} diff --git a/daemon/cluster/tasks.go b/daemon/cluster/tasks.go new file mode 100644 index 0000000..47cd556 --- /dev/null +++ b/daemon/cluster/tasks.go @@ -0,0 +1,87 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// GetTasks returns a list of tasks matching the filter options. +func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + byName := func(filter filters.Args) error { + if filter.Include("service") { + serviceFilters := filter.Get("service") + for _, serviceFilter := range serviceFilters { + service, err := c.GetService(serviceFilter, false) + if err != nil { + return err + } + filter.Del("service", serviceFilter) + filter.Add("service", service.ID) + } + } + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + node, err := c.GetNode(nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", node.ID) + } + } + return nil + } + + filters, err := newListTasksFilters(options.Filters, byName) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListTasks( + ctx, + &swarmapi.ListTasksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + tasks := make([]types.Task, 0, len(r.Tasks)) + + for _, task := range r.Tasks { + if task.Spec.GetContainer() != nil { + tasks = append(tasks, convert.TaskFromGRPC(*task)) + } + } + return tasks, nil +} + +// GetTask returns a task by an ID. +func (c *Cluster) GetTask(input string) (types.Task, error) { + var task *swarmapi.Task + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + t, err := getTask(ctx, state.controlClient, input) + if err != nil { + return err + } + task = t + return nil + }); err != nil { + return types.Task{}, err + } + return convert.TaskFromGRPC(*task), nil +} diff --git a/daemon/cluster/utils.go b/daemon/cluster/utils.go new file mode 100644 index 0000000..9382796 --- /dev/null +++ b/daemon/cluster/utils.go @@ -0,0 +1,63 @@ +package cluster + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" +) + +func loadPersistentState(root string) (*nodeStartConfig, error) { + dt, err := ioutil.ReadFile(filepath.Join(root, stateFile)) + if err != nil { + return nil, err + } + // missing certificate means no actual state to restore from + if _, err := os.Stat(filepath.Join(root, "certificates/swarm-node.crt")); err != nil { + if os.IsNotExist(err) { + clearPersistentState(root) + } + return nil, err + } + var st nodeStartConfig + if err := json.Unmarshal(dt, &st); err != nil { + return nil, err + } + return &st, nil +} + +func savePersistentState(root string, config nodeStartConfig) error { + dt, err := json.Marshal(config) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(root, stateFile), dt, 0600) +} + +func clearPersistentState(root string) error { + // todo: backup this data instead of removing? + // rather than delete the entire swarm directory, delete the contents in order to preserve the inode + // (for example, allowing it to be bind-mounted) + files, err := ioutil.ReadDir(root) + if err != nil { + return err + } + + for _, f := range files { + if err := os.RemoveAll(filepath.Join(root, f.Name())); err != nil { + return err + } + } + + return nil +} + +func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { + return reachable-2 <= unreachable +} + +func isLastManager(reachable, unreachable int) bool { + return reachable == 1 && unreachable == 0 +} diff --git a/daemon/commit.go b/daemon/commit.go new file mode 100644 index 0000000..084f488 --- /dev/null +++ b/daemon/commit.go @@ -0,0 +1,252 @@ +package daemon + +import ( + "encoding/json" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + imageEnvKey = strings.ToUpper(imageEnvKey) + userEnvKey = strings.ToUpper(userEnvKey) + } + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + for l, v := range imageConf.Labels { + if _, ok := userConf.Labels[l]; !ok { + userConf.Labels[l] = v + } + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + userConf.ArgsEscaped = imageConf.ArgsEscaped + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if imageConf.Healthcheck != nil { + if userConf.Healthcheck == nil { + userConf.Healthcheck = imageConf.Healthcheck + } else { + if len(userConf.Healthcheck.Test) == 0 { + userConf.Healthcheck.Test = imageConf.Healthcheck.Test + } + if userConf.Healthcheck.Interval == 0 { + userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval + } + if userConf.Healthcheck.Timeout == 0 { + userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout + } + if userConf.Healthcheck.StartPeriod == 0 { + userConf.Healthcheck.StartPeriod = imageConf.Healthcheck.StartPeriod + } + if userConf.Healthcheck.Retries == 0 { + userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries + } + } + } + + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + // It is not possible to commit a running container on Windows and on Solaris. + if (runtime.GOOS == "windows" || runtime.GOOS == "solaris") && container.IsRunning() { + return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS) + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) + if err != nil { + return "", err + } + + if c.MergeConfigs { + if err := merge(newConfig, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var parent *image.Image + if container.ImageID == "" { + parent = new(image.Image) + parent.RootFS = image.NewRootFS() + } else { + parent, err = daemon.stores[container.Platform].imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + } + + l, err := daemon.stores[container.Platform].layerStore.Register(rwTar, parent.RootFS.ChainID(), layer.Platform(container.Platform)) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.stores[container.Platform].layerStore, l) + + containerConfig := c.ContainerConfig + if containerConfig == nil { + containerConfig = container.Config + } + cc := image.ChildConfig{ + ContainerID: container.ID, + Author: c.Author, + Comment: c.Comment, + ContainerConfig: containerConfig, + Config: newConfig, + DiffID: l.DiffID(), + } + config, err := json.Marshal(image.NewChildImage(parent, cc, container.Platform)) + if err != nil { + return "", err + } + + id, err := daemon.stores[container.Platform].imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.stores[container.Platform].imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + imageRef := "" + if c.Repo != "" { + newTag, err := reference.ParseNormalizedNamed(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if !reference.IsNameOnly(newTag) { + return "", errors.Errorf("unexpected repository name: %s", c.Repo) + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImageWithReference(id, container.Platform, newTag); err != nil { + return "", err + } + imageRef = reference.FamiliarString(newTag) + } + + attributes := map[string]string{ + "comment": c.Comment, + "imageID": id.String(), + "imageRef": imageRef, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + containerActions.WithValues("commit").UpdateSince(start) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/daemon/config/config.go b/daemon/config/config.go new file mode 100644 index 0000000..5009ff6 --- /dev/null +++ b/daemon/config/config.go @@ -0,0 +1,526 @@ +package config + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "reflect" + "runtime" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + daemondiscovery "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" + "github.com/spf13/pflag" +) + +const ( + // DefaultMaxConcurrentDownloads is the default value for + // maximum number of downloads that + // may take place at a time for each pull. + DefaultMaxConcurrentDownloads = 3 + // DefaultMaxConcurrentUploads is the default value for + // maximum number of uploads that + // may take place at a time for each push. + DefaultMaxConcurrentUploads = 5 + // StockRuntimeName is the reserved name/alias used to represent the + // OCI runtime being shipped with the docker daemon package. + StockRuntimeName = "runc" + // DefaultShmSize is the default value for container's shm size + DefaultShmSize = int64(67108864) + // DefaultNetworkMtu is the default value for network MTU + DefaultNetworkMtu = 1500 + // DisableNetworkBridge is the default value of the option to disable network bridge + DisableNetworkBridge = "none" + // DefaultInitBinary is the name of the default init binary + DefaultInitBinary = "docker-init" +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, + "runtimes": true, + "default-ulimits": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// commonBridgeConfig stores all the platform-common bridge driver specific +// configuration. +type commonBridgeConfig struct { + Iface string `json:"bridge,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which is +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonConfig struct { + AuthzMiddleware *authorization.Middleware `json:"-"` + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + DeltaGraphDriver string `json:"delta-storage-driver,omitempty"` + DeltaGraphOptions []string `json:"delta-storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + RootDeprecated string `json:"graph,omitempty"` + Root string `json:"data-root,omitempty"` + DeltaRoot string `json:"delta-data-root,omitempty"` + SocketGroup string `json:"group,omitempty"` + CorsHeaders string `json:"api-cors-header,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + + // TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests + // when pushing to a registry which does not support schema 2. This field is marked as + // deprecated because schema 1 manifests are deprecated in favor of schema 2 and the + // daemon ID will use a dedicated identifier not shared with exported signatures. + TrustKeyPath string `json:"deprecated-key-path,omitempty"` + + // LiveRestoreEnabled determines whether we should keep containers + // alive upon daemon shutdown/start + LiveRestoreEnabled bool `json:"live-restore,omitempty"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + // MaxConcurrentDownloads is the maximum number of downloads that + // may take place at a time for each pull. + MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` + + // MaxConcurrentUploads is the maximum number of uploads that + // may take place at a time for each push. + MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` + + // ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container + // to stop when daemon is being shutdown + ShutdownTimeout int `json:"shutdown-timeout,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + + // SwarmDefaultAdvertiseAddr is the default host/IP or network interface + // to use if a wildcard address is specified in the ListenAddr value + // given to the /swarm/init endpoint and no advertise address is + // specified. + SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` + MetricsAddress string `json:"metrics-addr"` + + LogConfig + BridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + sync.Mutex + // FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags + // It should probably be handled outside this package. + ValuesSet map[string]interface{} + + Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (conf *Config) IsValueSet(name string) bool { + if conf.ValuesSet == nil { + return false + } + _, ok := conf.ValuesSet[name] + return ok +} + +// New returns a new fully initialized Config struct +func New() *Config { + config := Config{} + config.LogConfig.Config = make(map[string]string) + config.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + config.V2Only = true + } + return &config +} + +// ParseClusterAdvertiseSettings parses the specified advertise settings +func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { + if runtime.GOOS == "solaris" && (clusterAdvertise != "" || clusterStore != "") { + return "", errors.New("Cluster Advertise Settings not supported on Solaris") + } + if clusterAdvertise == "" { + return "", daemondiscovery.ErrDiscoveryDisabled + } + if clusterStore == "" { + return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") + } + + advertise, err := discovery.ParseAdvertise(clusterAdvertise) + if err != nil { + return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) + } + return advertise, nil +} + +// GetConflictFreeLabels validates Labels for conflict +// In swarm the duplicates for labels are removed +// so we only take same values here, no conflict values +// If the key-value is the same we will only take the last label +func GetConflictFreeLabels(labels []string) ([]string, error) { + labelMap := map[string]string{} + for _, label := range labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will return an error + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + + newLabels := []string{} + for k, v := range labelMap { + newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v)) + } + return newLabels, nil +} + +// Reload reads the configuration in the host and reloads the daemon and server. +func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := Validate(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (17.12) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := GetConflictFreeLabels(newConfig.Labels) + // if err != nil { + // return err + // } + // newConfig.Labels = newLabels + // + if _, err := GetConflictFreeLabels(newConfig.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := Validate(fileConfig); err != nil { + return nil, fmt.Errorf("configuration validation from file failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + // We need to validate again once both fileConfig and flagsConfig + // have been merged + if err := Validate(fileConfig); err != nil { + return nil, fmt.Errorf("merged configuration validation from file and command line flags failed (%v)", err) + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overridden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup(key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *pflag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.ValuesSet = configSet + } + + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&config); err != nil { + return nil, err + } + + if config.RootDeprecated != "" { + logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`) + + if config.Root != "" { + return nil, fmt.Errorf(`cannot specify both "graph" and "data-root" config file options`) + } + + config.Root = config.RootDeprecated + } + + return &config, nil +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + if flag := flags.Lookup(key); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *pflag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *pflag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload + for _, name := range []string{f.Name, f.Shorthand} { + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// Validate validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch, +// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. +func Validate(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + // validate MaxConcurrentDownloads + if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { + return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) + } + // validate MaxConcurrentUploads + if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { + return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) + } + + // validate that "default" runtime is not reset + if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { + if _, ok := runtimes[StockRuntimeName]; ok { + return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName) + } + } + + if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != StockRuntimeName { + runtimes := config.GetAllRuntimes() + if _, ok := runtimes[defaultRuntime]; !ok { + return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) + } + } + + return nil +} + +// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not. +func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { + if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { + return true + } + + if (config.ClusterOpts == nil && clusterOpts == nil) || + (config.ClusterOpts == nil && len(clusterOpts) == 0) || + (len(config.ClusterOpts) == 0 && clusterOpts == nil) { + return false + } + + return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) +} diff --git a/daemon/config/config_common_unix.go b/daemon/config/config_common_unix.go new file mode 100644 index 0000000..d11cceb --- /dev/null +++ b/daemon/config/config_common_unix.go @@ -0,0 +1,73 @@ +// +build solaris linux freebsd + +package config + +import ( + "net" + + "github.com/docker/docker/api/types" +) + +// CommonUnixConfig defines configuration of a docker daemon that is +// common across Unix platforms. +type CommonUnixConfig struct { + ExecRoot string `json:"exec-root,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` + Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` + DefaultRuntime string `json:"default-runtime,omitempty"` + DefaultInitBinary string `json:"default-init,omitempty"` +} + +type commonUnixBridgeConfig struct { + DefaultIP net.IP `json:"ip,omitempty"` + IP string `json:"bip,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (conf *Config) GetRuntime(name string) *types.Runtime { + conf.Lock() + defer conf.Unlock() + if rt, ok := conf.Runtimes[name]; ok { + return &rt + } + return nil +} + +// GetDefaultRuntimeName returns the current default runtime +func (conf *Config) GetDefaultRuntimeName() string { + conf.Lock() + rt := conf.DefaultRuntime + conf.Unlock() + + return rt +} + +// GetAllRuntimes returns a copy of the runtimes map +func (conf *Config) GetAllRuntimes() map[string]types.Runtime { + conf.Lock() + rts := conf.Runtimes + conf.Unlock() + return rts +} + +// GetExecRoot returns the user configured Exec-root +func (conf *Config) GetExecRoot() string { + return conf.ExecRoot +} + +// GetInitPath returns the configured docker-init path +func (conf *Config) GetInitPath() string { + conf.Lock() + defer conf.Unlock() + if conf.InitPath != "" { + return conf.InitPath + } + if conf.DefaultInitBinary != "" { + return conf.DefaultInitBinary + } + return DefaultInitBinary +} diff --git a/daemon/config/config_common_unix_test.go b/daemon/config/config_common_unix_test.go new file mode 100644 index 0000000..8647a22 --- /dev/null +++ b/daemon/config/config_common_unix_test.go @@ -0,0 +1,84 @@ +// +build !windows + +package config + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCommonUnixValidateConfigurationErrors(t *testing.T) { + testCases := []struct { + config *Config + }{ + // Can't override the stock runtime + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + Runtimes: map[string]types.Runtime{ + StockRuntimeName: {}, + }, + }, + }, + }, + // Default runtime should be present in runtimes + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + Runtimes: map[string]types.Runtime{ + "foo": {}, + }, + DefaultRuntime: "bar", + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err == nil { + t.Fatalf("expected error, got nil for config %v", tc.config) + } + } +} + +func TestCommonUnixGetInitPath(t *testing.T) { + testCases := []struct { + config *Config + expectedInitPath string + }{ + { + config: &Config{ + InitPath: "some-init-path", + }, + expectedInitPath: "some-init-path", + }, + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + DefaultInitBinary: "foo-init-bin", + }, + }, + expectedInitPath: "foo-init-bin", + }, + { + config: &Config{ + InitPath: "init-path-A", + CommonUnixConfig: CommonUnixConfig{ + DefaultInitBinary: "init-path-B", + }, + }, + expectedInitPath: "init-path-A", + }, + { + config: &Config{}, + expectedInitPath: "docker-init", + }, + } + for _, tc := range testCases { + initPath := tc.config.GetInitPath() + if initPath != tc.expectedInitPath { + t.Fatalf("expected initPath to be %v, got %v", tc.expectedInitPath, initPath) + } + } +} diff --git a/daemon/config/config_solaris.go b/daemon/config/config_solaris.go new file mode 100644 index 0000000..f4f0802 --- /dev/null +++ b/daemon/config/config_solaris.go @@ -0,0 +1,29 @@ +package config + +import ( + "github.com/spf13/pflag" +) + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig +} + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig + + // Fields below here are platform specific. + commonUnixBridgeConfig +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + return nil +} diff --git a/daemon/config/config_test.go b/daemon/config/config_test.go new file mode 100644 index 0000000..cc5f010 --- /dev/null +++ b/daemon/config/config_test.go @@ -0,0 +1,391 @@ +package config + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestDaemonConfigurationNotFound(t *testing.T) { + _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected does not exist error, got %v", err) + } +} + +func TestDaemonBrokenConfiguration(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"Debug": tru`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected error, got %v", err) + } +} + +func TestParseClusterAdvertiseSettings(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ClusterSettings not supported on Solaris\n") + } + _, err := ParseClusterAdvertiseSettings("something", "") + if err != discovery.ErrDiscoveryDisabled { + t.Fatalf("expected discovery disabled error, got %v\n", err) + } + + _, err = ParseClusterAdvertiseSettings("", "something") + if err == nil { + t.Fatalf("expected discovery store error, got %v\n", err) + } + + _, err = ParseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") + if err != nil { + t.Fatal(err) + } +} + +func TestFindConfigurationConflicts(t *testing.T) { + config := map[string]interface{}{"authorization-plugins": "foobar"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.String("authorization-plugins", "", "") + assert.NoError(t, flags.Set("authorization-plugins", "asdf")) + + testutil.ErrorContains(t, + findConfigurationConflicts(config, flags), + "authorization-plugins: (from flag: asdf, from file: foobar)") +} + +func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { + config := map[string]interface{}{"hosts": []string{"qwer"}} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var hosts []string + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") + assert.NoError(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.NoError(t, flags.Set("host", "unix:///var/run/docker.sock")) + + testutil.ErrorContains(t, findConfigurationConflicts(config, flags), "hosts") +} + +func TestDaemonConfigurationMergeConflicts(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"debug": true}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.Bool("debug", false, "") + flags.Set("debug", "false") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "debug") { + t.Fatalf("expected debug conflict, got %v", err) + } +} + +func TestDaemonConfigurationMergeConcurrent(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"max-concurrent-downloads": 1}`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err != nil { + t.Fatal("expected error, got nil") + } +} + +func TestDaemonConfigurationMergeConcurrentError(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"max-concurrent-downloads": -1}`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected no error, got error %v", err) + } +} + +func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("tlscacert", "", "") + flags.Set("tlscacert", "~/.docker/ca.pem") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "tlscacert") { + t.Fatalf("expected tlscacert conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { + config := map[string]interface{}{"tls-verify": "true"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.Bool("tlsverify", false, "") + err := findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { + t.Fatalf("expected tls-verify conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { + var hosts []string + config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} + flags := pflag.NewFlagSet("base", pflag.ContinueOnError) + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "") + + err := findConfigurationConflicts(config, flags) + if err != nil { + t.Fatal(err) + } + + flags.Set("host", "unix:///var/run/docker.sock") + err = findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { + t.Fatalf("expected hosts conflict, got %v", err) + } +} + +func TestValidateConfigurationErrors(t *testing.T) { + minusNumber := -10 + testCases := []struct { + config *Config + }{ + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo=bar", "one"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1o"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"2.2.2.2", "1.1.1.1o"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"123456"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c", "123456"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentDownloads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-downloads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentUploads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-uploads": -1, + }, + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err == nil { + t.Fatalf("expected error, got nil for config %v", tc.config) + } + } +} + +func TestValidateConfiguration(t *testing.T) { + minusNumber := 4 + testCases := []struct { + config *Config + }{ + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one=two"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentDownloads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-downloads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentUploads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-uploads": -1, + }, + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + } +} + +func TestModifiedDiscoverySettings(t *testing.T) { + cases := []struct { + current *Config + modified *Config + expected bool + }{ + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", nil), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("baz", "bar", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "baz", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: true, + }, + } + + for _, c := range cases { + got := ModifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) + if c.expected != got { + t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) + } + } +} + +func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { + return &Config{ + CommonConfig: CommonConfig{ + ClusterStore: backendAddr, + ClusterAdvertise: advertiseAddr, + ClusterOpts: opts, + }, + } +} diff --git a/daemon/config/config_unix.go b/daemon/config/config_unix.go new file mode 100644 index 0000000..8f1da59 --- /dev/null +++ b/daemon/config/config_unix.go @@ -0,0 +1,63 @@ +// +build linux freebsd + +package config + +import ( + "fmt" + + "github.com/docker/docker/opts" + units "github.com/docker/go-units" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig + + // Fields below here are platform specific. + CgroupParent string `json:"cgroup-parent,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` + CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` + OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` + Init bool `json:"init,omitempty"` + InitPath string `json:"init-path,omitempty"` + SeccompProfile string `json:"seccomp-profile,omitempty"` + ShmSize opts.MemBytes `json:"default-shm-size,omitempty"` + NoNewPrivileges bool `json:"no-new-privileges,omitempty"` +} + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig + + // These fields are common to all unix platforms. + commonUnixBridgeConfig + + // Fields below here are platform specific. + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-masq,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + UserlandProxyPath string `json:"userland-proxy-path,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + if conf.ClusterStore != "" || conf.ClusterAdvertise != "" { + return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + } + if conf.LiveRestoreEnabled { + return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") + } + return nil +} diff --git a/daemon/config/config_unix_test.go b/daemon/config/config_unix_test.go new file mode 100644 index 0000000..9e52cb7 --- /dev/null +++ b/daemon/config/config_unix_test.go @@ -0,0 +1,139 @@ +// +build !windows + +package config + +import ( + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/docker/go-units" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetConflictFreeConfiguration(t *testing.T) { + configFileData := string([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + file := tempfile.NewTempFile(t, "docker-config", configFileData) + defer file.Remove() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := getConflictFreeConfiguration(file.Name(), flags) + require.NoError(t, err) + + assert.True(t, cc.Debug) + + expectedUlimits := map[string]*units.Ulimit{ + "nofile": { + Name: "nofile", + Hard: 2048, + Soft: 1024, + }, + } + + assert.Equal(t, expectedUlimits, cc.Ulimits) +} + +func TestDaemonConfigurationMerge(t *testing.T) { + configFileData := string([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + file := tempfile.NewTempFile(t, "docker-config", configFileData) + defer file.Remove() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := MergeDaemonConfigurations(c, flags, file.Name()) + require.NoError(t, err) + + assert.True(t, cc.Debug) + assert.True(t, cc.AutoRestart) + + expectedLogConfig := LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test_tag"}, + } + + assert.Equal(t, expectedLogConfig, cc.LogConfig) + + expectedUlimits := map[string]*units.Ulimit{ + "nofile": { + Name: "nofile", + Hard: 2048, + Soft: 1024, + }, + } + + assert.Equal(t, expectedUlimits, cc.Ulimits) +} + +func TestDaemonConfigurationMergeShmSize(t *testing.T) { + data := string([]byte(` + { + "default-shm-size": "1g" + }`)) + + file := tempfile.NewTempFile(t, "docker-config", data) + defer file.Remove() + + c := &Config{} + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + shmSize := opts.MemBytes(DefaultShmSize) + flags.Var(&shmSize, "default-shm-size", "") + + cc, err := MergeDaemonConfigurations(c, flags, file.Name()) + require.NoError(t, err) + + expectedValue := 1 * 1024 * 1024 * 1024 + if cc.ShmSize.Value() != int64(expectedValue) { + t.Fatalf("expected default shm size %d, got %d", expectedValue, cc.ShmSize.Value()) + } +} diff --git a/daemon/config/config_windows.go b/daemon/config/config_windows.go new file mode 100644 index 0000000..849acc1 --- /dev/null +++ b/daemon/config/config_windows.go @@ -0,0 +1,52 @@ +package config + +import ( + "github.com/docker/docker/api/types" +) + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig +} + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `dockerd -e windows` +type Config struct { + CommonConfig + + // Fields below here are platform specific. (There are none presently + // for the Windows daemon.) +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (conf *Config) GetRuntime(name string) *types.Runtime { + return nil +} + +// GetInitPath returns the configure docker-init path +func (conf *Config) GetInitPath() string { + return "" +} + +// GetDefaultRuntimeName returns the current default runtime +func (conf *Config) GetDefaultRuntimeName() string { + return StockRuntimeName +} + +// GetAllRuntimes returns a copy of the runtimes map +func (conf *Config) GetAllRuntimes() map[string]types.Runtime { + return map[string]types.Runtime{} +} + +// GetExecRoot returns the user configured Exec-root +func (conf *Config) GetExecRoot() string { + return "" +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + return nil +} diff --git a/daemon/config/config_windows_test.go b/daemon/config/config_windows_test.go new file mode 100644 index 0000000..92ee8e4 --- /dev/null +++ b/daemon/config/config_windows_test.go @@ -0,0 +1,60 @@ +// +build windows + +package config + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/opts" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := MergeDaemonConfigurations(c, flags, configFile) + require.NoError(t, err) + + assert.True(t, cc.Debug) + assert.True(t, cc.AutoRestart) + + expectedLogConfig := LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test_tag"}, + } + + assert.Equal(t, expectedLogConfig, cc.LogConfig) +} diff --git a/daemon/configs.go b/daemon/configs.go new file mode 100644 index 0000000..31da56b --- /dev/null +++ b/daemon/configs.go @@ -0,0 +1,23 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// SetContainerConfigReferences sets the container config references needed +func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error { + if !configsSupported() && len(refs) > 0 { + logrus.Warn("configs are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.ConfigReferences = refs + + return nil +} diff --git a/daemon/configs_linux.go b/daemon/configs_linux.go new file mode 100644 index 0000000..af20ad7 --- /dev/null +++ b/daemon/configs_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func configsSupported() bool { + return true +} diff --git a/daemon/configs_unsupported.go b/daemon/configs_unsupported.go new file mode 100644 index 0000000..1a7cbc9 --- /dev/null +++ b/daemon/configs_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package daemon + +func configsSupported() bool { + return false +} diff --git a/daemon/configs_windows.go b/daemon/configs_windows.go new file mode 100644 index 0000000..7cb2e9c --- /dev/null +++ b/daemon/configs_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package daemon + +func configsSupported() bool { + return true +} diff --git a/daemon/container.go b/daemon/container.go new file mode 100644 index 0000000..149df0d --- /dev/null +++ b/daemon/container.go @@ -0,0 +1,321 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// checkContainer make sure the specified container validates the specified conditions +func (daemon *Daemon) checkContainer(container *container.Container, conditions ...func(*container.Container) error) error { + for _, condition := range conditions { + if err := condition(container); err != nil { + return err + } + } + return nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) error { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.StreamConfig.NewInputPipes() + } else { + c.StreamConfig.NewNopInputPipe() + } + + // once in the memory store it is visible to other goroutines + // grab a Lock until it has been checkpointed to avoid races + c.Lock() + defer c.Unlock() + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) + return c.CheckpointTo(daemon.containersReplica) +} + +func (daemon *Daemon) newContainer(name string, platform string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + if hostConfig.NetworkMode.IsHost() { + if config.Hostname == "" { + config.Hostname, err = os.Hostname() + if err != nil { + return nil, err + } + } + } else { + daemon.generateHostname(id, config) + } + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Managed = managed + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName(platform) + base.Platform = platform + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.nameIndex.Get(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return daemon.parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + runconfig.SetDefaultNetModeIfBlank(hostConfig) + container.HostConfig = hostConfig + return container.CheckpointTo(daemon.containersReplica) +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + + // Validate if Env contains empty variable or not (e.g., ``, `=foo`) + for _, env := range config.Env { + if _, err := opts.ValidateEnv(env); err != nil { + return nil, err + } + } + + // Validate the healthcheck params of Config + if config.Healthcheck != nil { + if config.Healthcheck.Interval != 0 && config.Healthcheck.Interval < containertypes.MinimumDuration { + return nil, fmt.Errorf("Interval in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + + if config.Healthcheck.Timeout != 0 && config.Healthcheck.Timeout < containertypes.MinimumDuration { + return nil, fmt.Errorf("Timeout in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + + if config.Healthcheck.Retries < 0 { + return nil, fmt.Errorf("Retries in Healthcheck cannot be negative") + } + + if config.Healthcheck.StartPeriod != 0 && config.Healthcheck.StartPeriod < containertypes.MinimumDuration { + return nil, fmt.Errorf("StartPeriod in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + } + } + + if hostConfig == nil { + return nil, nil + } + + if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy") + } + + for _, extraHost := range hostConfig.ExtraHosts { + if _, err := opts.ValidateExtraHost(extraHost); err != nil { + return nil, err + } + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort) + } + } + } + + p := hostConfig.RestartPolicy + + switch p.Name { + case "always", "unless-stopped", "no": + if p.MaximumRetryCount != 0 { + return nil, fmt.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) + } + case "on-failure": + if p.MaximumRetryCount < 0 { + return nil, fmt.Errorf("maximum retry count cannot be negative") + } + case "": + // do nothing + default: + return nil, fmt.Errorf("invalid restart policy '%s'", p.Name) + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} diff --git a/daemon/container_linux.go b/daemon/container_linux.go new file mode 100644 index 0000000..2c87715 --- /dev/null +++ b/daemon/container_linux.go @@ -0,0 +1,29 @@ +//+build !windows + +package daemon + +import ( + "github.com/docker/docker/container" +) + +func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { + container.AppArmorProfile = "" //we don't care about the previous value. + + if !daemon.apparmorEnabled { + return nil // if apparmor is disabled there is nothing to do here. + } + + if err := parseSecurityOpt(container, container.HostConfig); err != nil { + return err + } + + if !container.HostConfig.Privileged { + if container.AppArmorProfile == "" { + container.AppArmorProfile = defaultApparmorProfile + } + + } else { + container.AppArmorProfile = "unconfined" + } + return nil +} diff --git a/daemon/container_operations.go b/daemon/container_operations.go new file mode 100644 index 0000000..00ed2f4 --- /dev/null +++ b/daemon/container_operations.go @@ -0,0 +1,1085 @@ +package daemon + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" +) + +var ( + // ErrRootFSReadOnly is returned when a container + // rootfs is marked readonly. + ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + getPortMapInfo = container.GetSandboxPortMapInfo +) + +func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []string { + if len(container.HostConfig.DNSSearch) > 0 { + return container.HostConfig.DNSSearch + } + + if len(daemon.configStore.DNSSearch) > 0 { + return daemon.configStore.DNSSearch + } + + return nil +} + +func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { + var ( + sboxOptions []libnetwork.SandboxOption + err error + dns []string + dnsOptions []string + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), + libnetwork.OptionDomainname(container.Config.Domainname)) + + if container.HostConfig.NetworkMode.IsHost() { + sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) + if len(container.HostConfig.ExtraHosts) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) + } + if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && + len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && + len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) + } + } else { + // OptionUseExternalKey is mandatory for userns support. + // But optional for non-userns support + sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) + } + + if err = setupPathsAndSandboxOptions(container, &sboxOptions); err != nil { + return nil, err + } + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemon.configStore.DNS) > 0 { + dns = daemon.configStore.DNS + } + + for _, d := range dns { + sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) + } + + dnsSearch := daemon.getDNSSearchSettings(container) + + for _, ds := range dnsSearch { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) + } + + if len(container.HostConfig.DNSOptions) > 0 { + dnsOptions = container.HostConfig.DNSOptions + } else if len(daemon.configStore.DNSOptions) > 0 { + dnsOptions = daemon.configStore.DNSOptions + } + + for _, ds := range dnsOptions { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) + } + + if container.NetworkSettings.SecondaryIPAddresses != nil { + name := container.Config.Hostname + if container.Config.Domainname != "" { + name = name + "." + container.Config.Domainname + } + + for _, a := range container.NetworkSettings.SecondaryIPAddresses { + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) + } + } + + for _, extraHost := range container.HostConfig.ExtraHosts { + // allow IPv6 addresses in extra hosts; only split on first ":" + if _, err := opts.ValidateExtraHost(extraHost); err != nil { + return nil, err + } + parts := strings.SplitN(extraHost, ":", 2) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + sboxOptions = append(sboxOptions, + libnetwork.OptionPortMapping(pbList), + libnetwork.OptionExposedPorts(exposeList)) + + // Legacy Link feature is supported only for the default bridge network. + // return if this call to build join options is not for default bridge network + // Legacy Link is only supported by docker run --link + bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName] + if !ok || bridgeSettings.EndpointSettings == nil { + return sboxOptions, nil + } + + if bridgeSettings.EndpointID == "" { + return sboxOptions, nil + } + + var ( + childEndpoints, parentEndpoints []string + cEndpointID string + ) + + children := daemon.children(container) + for linkAlias, child := range children { + if !isLinkable(child) { + return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) + } + _, alias := path.Split(linkAlias) + // allow access to the linked container via the alias, real name, and container hostname + aliasList := alias + " " + child.Config.Hostname + // only add the name if alias isn't equal to the name + if alias != child.Name[1:] { + aliasList = aliasList + " " + child.Name[1:] + } + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) + cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID + if cEndpointID != "" { + childEndpoints = append(childEndpoints, cEndpointID) + } + } + + for alias, parent := range daemon.parents(container) { + if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { + continue + } + + _, alias = path.Split(alias) + logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) + sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( + parent.ID, + alias, + bridgeSettings.IPAddress, + )) + if cEndpointID != "" { + parentEndpoints = append(parentEndpoints, cEndpointID) + } + } + + linkOptions := options.Generic{ + netlabel.GenericData: options.Generic{ + "ParentEndpoints": parentEndpoints, + "ChildEndpoints": childEndpoints, + }, + } + + sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) + return sboxOptions, nil +} + +func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error { + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)} + } + + if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + for s := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(s) + if err != nil { + continue + } + + if sn.Name() == n.Name() { + // Avoid duplicate config + return nil + } + if !containertypes.NetworkMode(sn.Type()).IsPrivate() || + !containertypes.NetworkMode(n.Type()).IsPrivate() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(sn.Name()).IsNone() || + containertypes.NetworkMode(n.Name()).IsNone() { + return runconfig.ErrConflictNoNetwork + } + } + + if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + + return nil +} + +func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.BuildEndpointInfo(n, ep); err != nil { + return err + } + + if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { + container.NetworkSettings.Bridge = daemon.configStore.BridgeConfig.Iface + } + + return nil +} + +// UpdateNetwork is used to update the container's network (e.g. when linked containers +// get removed/unlinked). +func (daemon *Daemon) updateNetwork(container *container.Container) error { + var ( + start = time.Now() + ctrl = daemon.netController + sid = container.NetworkSettings.SandboxID + ) + + sb, err := ctrl.SandboxByID(sid) + if err != nil { + return fmt.Errorf("error locating sandbox id %s: %v", sid, err) + } + + // Find if container is connected to the default bridge network + var n libnetwork.Network + for name := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(name) + if err != nil { + continue + } + if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { + n = sn + break + } + } + + if n == nil { + // Not connected to the default bridge network; Nothing to do + return nil + } + + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return fmt.Errorf("Update network failed: %v", err) + } + + if err := sb.Refresh(options...); err != nil { + return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) + } + + networkActions.WithValues("update").UpdateSince(start) + + return nil +} + +func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (libnetwork.Network, *networktypes.NetworkingConfig, error) { + n, err := daemon.FindNetwork(idOrName) + if err != nil { + // We should always be able to find the network for a + // managed container. + if container.Managed { + return nil, nil, err + } + } + + // If we found a network and if it is not dynamically created + // we should never attempt to attach to that network here. + if n != nil { + if container.Managed || !n.Info().Dynamic() { + return n, nil, nil + } + } + + var addresses []string + if epConfig != nil && epConfig.IPAMConfig != nil { + if epConfig.IPAMConfig.IPv4Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv4Address) + } + + if epConfig.IPAMConfig.IPv6Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv6Address) + } + } + + var ( + config *networktypes.NetworkingConfig + retryCount int + ) + + for { + // In all other cases, attempt to attach to the network to + // trigger attachment in the swarm cluster manager. + if daemon.clusterProvider != nil { + var err error + config, err = daemon.clusterProvider.AttachNetwork(idOrName, container.ID, addresses) + if err != nil { + return nil, nil, err + } + } + + n, err = daemon.FindNetwork(idOrName) + if err != nil { + if daemon.clusterProvider != nil { + if err := daemon.clusterProvider.DetachNetwork(idOrName, container.ID); err != nil { + logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) + } + } + + // Retry network attach again if we failed to + // find the network after successful + // attachment because the only reason that + // would happen is if some other container + // attached to the swarm scope network went down + // and removed the network while we were in + // the process of attaching. + if config != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + if retryCount >= 5 { + return nil, nil, fmt.Errorf("could not find network %s after successful attachment", idOrName) + } + retryCount++ + continue + } + } + + return nil, nil, err + } + + break + } + + // This container has attachment to a swarm scope + // network. Update the container network settings accordingly. + container.NetworkSettings.HasSwarmEndpoint = true + return n, config, nil +} + +// updateContainerNetworkSettings updates the network settings +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) { + var n libnetwork.Network + + mode := container.HostConfig.NetworkMode + if container.Config.NetworkDisabled || mode.IsContainer() { + return + } + + networkName := mode.NetworkName() + if mode.IsDefault() { + networkName = daemon.netController.Config().Daemon.DefaultNetwork + } + + if mode.IsUserDefined() { + var err error + + n, err = daemon.FindNetwork(networkName) + if err == nil { + networkName = n.Name() + } + } + + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{} + } + + if len(endpointsConfig) > 0 { + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + } + + for name, epConfig := range endpointsConfig { + container.NetworkSettings.Networks[name] = &network.EndpointSettings{ + EndpointSettings: epConfig, + } + } + } + + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + container.NetworkSettings.Networks[networkName] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + + // Convert any settings added by client in default name to + // engine's default network name key + if mode.IsDefault() { + if nConf, ok := container.NetworkSettings.Networks[mode.NetworkName()]; ok { + container.NetworkSettings.Networks[networkName] = nConf + delete(container.NetworkSettings.Networks, mode.NetworkName()) + } + } + + if !mode.IsUserDefined() { + return + } + // Make sure to internally store the per network endpoint config by network name + if _, ok := container.NetworkSettings.Networks[networkName]; ok { + return + } + + if n != nil { + if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { + container.NetworkSettings.Networks[networkName] = nwConfig + delete(container.NetworkSettings.Networks, n.ID()) + return + } + } +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + start := time.Now() + controller := daemon.netController + + if daemon.netController == nil { + return nil + } + + // Cleanup any stale sandbox left over due to ungraceful daemon shutdown + if err := controller.SandboxDestroy(container.ID); err != nil { + logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) + } + + if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { + return nil + } + + updateSettings := false + + if len(container.NetworkSettings.Networks) == 0 { + daemon.updateContainerNetworkSettings(container, nil) + updateSettings = true + } + + // always connect default network first since only default + // network mode support link and we need do some setting + // on sandbox initialize for link, but the sandbox only be initialized + // on first network connecting. + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { + cleanOperationalData(nConf) + if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil { + return err + } + + } + + // the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks" + networks := make(map[string]*network.EndpointSettings) + for n, epConf := range container.NetworkSettings.Networks { + if n == defaultNetName { + continue + } + + networks[n] = epConf + } + + for netName, epConf := range networks { + cleanOperationalData(epConf) + if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil { + return err + } + } + + // If the container is not to be connected to any network, + // create its network sandbox now if not present + if len(networks) == 0 { + if nil == daemon.getNetworkSandbox(container) { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err := daemon.netController.NewSandbox(container.ID, options...) + if err != nil { + return err + } + container.UpdateSandboxNetworkSettings(sb) + defer func() { + if err != nil { + sb.Delete() + } + }() + } + + } + + if _, err := container.WriteHostConfig(); err != nil { + return err + } + networkActions.WithValues("allocate").UpdateSince(start) + return nil +} + +func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { + var sb libnetwork.Sandbox + daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { + if s.ContainerID() == container.ID { + sb = s + return true + } + return false + }) + return sb +} + +// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration +func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { + return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) +} + +// User specified ip address is acceptable only for networks with user specified subnets. +func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { + if n == nil || epConfig == nil { + return nil + } + if !hasUserDefinedIPAddress(epConfig) { + return nil + } + _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() + for _, s := range []struct { + ipConfigured bool + subnetConfigs []*libnetwork.IpamConf + }{ + { + ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, + subnetConfigs: nwIPv4Configs, + }, + { + ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, + subnetConfigs: nwIPv6Configs, + }, + } { + if s.ipConfigured { + foundSubnet := false + for _, cfg := range s.subnetConfigs { + if len(cfg.PreferredPool) > 0 { + foundSubnet = true + break + } + } + if !foundSubnet { + return runconfig.ErrUnsupportedNetworkNoSubnetAndIP + } + } + } + + return nil +} + +// cleanOperationalData resets the operational data from the passed endpoint settings +func cleanOperationalData(es *network.EndpointSettings) { + es.EndpointID = "" + es.Gateway = "" + es.IPAddress = "" + es.IPPrefixLen = 0 + es.IPv6Gateway = "" + es.GlobalIPv6Address = "" + es.GlobalIPv6PrefixLen = 0 + es.MacAddress = "" + if es.IPAMOperational { + es.IPAMConfig = nil + } +} + +func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error { + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + if hasUserDefinedIPAddress(endpointConfig) && !enableIPOnPredefinedNetwork() { + return runconfig.ErrUnsupportedNetworkAndIP + } + if endpointConfig != nil && len(endpointConfig.Aliases) > 0 && !container.EnableServiceDiscoveryOnDefaultNetwork() { + return runconfig.ErrUnsupportedNetworkAndAlias + } + } else { + addShortID := true + shortID := stringid.TruncateID(container.ID) + for _, alias := range endpointConfig.Aliases { + if alias == shortID { + addShortID = false + break + } + } + if addShortID { + endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) + } + } + + if err := validateNetworkingConfig(n, endpointConfig); err != nil { + return err + } + + if updateSettings { + if err := daemon.updateNetworkSettings(container, n, endpointConfig); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { + start := time.Now() + if container.HostConfig.NetworkMode.IsContainer() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(idOrName).IsBridge() && + daemon.configStore.DisableBridge { + container.Config.NetworkDisabled = true + return nil + } + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + + n, config, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig) + if err != nil { + return err + } + if n == nil { + return nil + } + + var operIPAM bool + if config != nil { + if epConfig, ok := config.EndpointsConfig[n.Name()]; ok { + if endpointConfig.IPAMConfig == nil || + (endpointConfig.IPAMConfig.IPv4Address == "" && + endpointConfig.IPAMConfig.IPv6Address == "" && + len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) { + operIPAM = true + } + + // copy IPAMConfig and NetworkID from epConfig via AttachNetwork + endpointConfig.IPAMConfig = epConfig.IPAMConfig + endpointConfig.NetworkID = epConfig.NetworkID + } + } + + err = daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings) + if err != nil { + return err + } + + controller := daemon.netController + sb := daemon.getNetworkSandbox(container) + createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb, daemon.configStore.DNS) + if err != nil { + return err + } + + endpointName := strings.TrimPrefix(container.Name, "/") + ep, err := n.CreateEndpoint(endpointName, createOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := ep.Delete(false); e != nil { + logrus.Warnf("Could not rollback container connection to network %s", idOrName) + } + } + }() + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + IPAMOperational: operIPAM, + } + if _, ok := container.NetworkSettings.Networks[n.ID()]; ok { + delete(container.NetworkSettings.Networks, n.ID()) + } + + if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { + return err + } + + if sb == nil { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err = controller.NewSandbox(container.ID, options...) + if err != nil { + return err + } + + container.UpdateSandboxNetworkSettings(sb) + } + + joinOptions, err := container.BuildJoinOptions(n) + if err != nil { + return err + } + + if err := ep.Join(sb, joinOptions...); err != nil { + return err + } + + if !container.Managed { + // add container name/alias to DNS + if err := daemon.ActivateContainerServiceBinding(container.Name); err != nil { + return fmt.Errorf("Activate container service binding for %s failed: %v", container.Name, err) + } + } + + if err := container.UpdateJoinInfo(n, ep); err != nil { + return fmt.Errorf("Updating join info failed: %v", err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sb) + + daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) + networkActions.WithValues("connect").UpdateSince(start) + return nil +} + +// ForceEndpointDelete deletes an endpoint from a network forcefully +func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error { + n, err := daemon.FindNetwork(networkName) + if err != nil { + return err + } + + ep, err := n.EndpointByName(name) + if err != nil { + return err + } + return ep.Delete(true) +} + +func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { + var ( + ep libnetwork.Endpoint + sbox libnetwork.Sandbox + ) + + s := func(current libnetwork.Endpoint) bool { + epInfo := current.Info() + if epInfo == nil { + return false + } + if sb := epInfo.Sandbox(); sb != nil { + if sb.ContainerID() == container.ID { + ep = current + sbox = sb + return true + } + } + return false + } + n.WalkEndpoints(s) + + if ep == nil && force { + epName := strings.TrimPrefix(container.Name, "/") + ep, err := n.EndpointByName(epName) + if err != nil { + return err + } + return ep.Delete(force) + } + + if ep == nil { + return fmt.Errorf("container %s is not connected to network %s", container.ID, n.Name()) + } + + if err := ep.Leave(sbox); err != nil { + return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sbox) + + if err := ep.Delete(false); err != nil { + return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) + } + + delete(container.NetworkSettings.Networks, n.Name()) + + daemon.tryDetachContainerFromClusterNetwork(n, container) + + return nil +} + +func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Network, container *container.Container) { + if daemon.clusterProvider != nil && network.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", network.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", network.ID(), err) + } + } + } + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(network, "disconnect", attributes) +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + initializeNetworkingPaths(container, nc) + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + if container.Config.Hostname == "" { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + start := time.Now() + if daemon.netController == nil { + return + } + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } + + sid := container.NetworkSettings.SandboxID + settings := container.NetworkSettings.Networks + container.NetworkSettings.Ports = nil + + if sid == "" { + return + } + + var networks []libnetwork.Network + for n, epSettings := range settings { + if nw, err := daemon.FindNetwork(n); err == nil { + networks = append(networks, nw) + } + + if epSettings.EndpointSettings == nil { + continue + } + + cleanOperationalData(epSettings) + } + + sb, err := daemon.netController.SandboxByID(sid) + if err != nil { + logrus.Warnf("error locating sandbox id %s: %v", sid, err) + return + } + + if err := sb.Delete(); err != nil { + logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) + } + + for _, nw := range networks { + daemon.tryDetachContainerFromClusterNetwork(nw, container) + } + networkActions.WithValues("release").UpdateSince(start) +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} + +// ConnectToNetwork connects a container to a network +func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + container.Lock() + defer container.Unlock() + + if !container.Running { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + + n, err := daemon.FindNetwork(idOrName) + if err == nil && n != nil { + if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil { + return err + } + } else { + container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + } else if !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else { + if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { + return err + } + } + + return container.CheckpointTo(daemon.containersReplica) +} + +// DisconnectFromNetwork disconnects container from network n. +func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { + n, err := daemon.FindNetwork(networkName) + container.Lock() + defer container.Unlock() + + if !container.Running || (err != nil && force) { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + // In case networkName is resolved we will use n.Name() + // this will cover the case where network id is passed. + if n != nil { + networkName = n.Name() + } + if _, ok := container.NetworkSettings.Networks[networkName]; !ok { + return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName) + } + delete(container.NetworkSettings.Networks, networkName) + } else if err == nil && !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else if err == nil { + if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + if err := daemon.disconnectFromNetwork(container, n, false); err != nil { + return err + } + } else { + return err + } + + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + + if n != nil { + daemon.LogNetworkEventWithAttributes(n, "disconnect", map[string]string{ + "container": container.ID, + }) + } + + return nil +} + +// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response +func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.EnableService() +} + +// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response +func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + // If the network sandbox is not found, then there is nothing to deactivate + logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName) + return nil + } + return sb.DisableService() +} diff --git a/daemon/container_operations_solaris.go b/daemon/container_operations_solaris.go new file mode 100644 index 0000000..1653948 --- /dev/null +++ b/daemon/container_operations_solaris.go @@ -0,0 +1,46 @@ +// +build solaris + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +} diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go new file mode 100644 index 0000000..277c83e --- /dev/null +++ b/daemon/container_operations_unix.go @@ -0,0 +1,356 @@ +// +build linux freebsd + +package daemon + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil || bridgeSettings.EndpointSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil || childBridgeSettings.EndpointSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + env = append(env, link.ToEnv()...) + } + + return env, nil +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + container, err := daemon.GetContainer(containerID) + if err != nil { + return nil, errors.Wrapf(err, "cannot join IPC of a non running container: %s", container.ID) + } + return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) +} + +func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.PidMode.Container() + container, err := daemon.GetContainer(containerID) + if err != nil { + return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", container.ID) + } + return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) +} + +func containerIsRunning(c *container.Container) error { + if !c.IsRunning() { + return errors.Errorf("container %s is not running", c.ID) + } + return nil +} + +func containerIsNotRestarting(c *container.Container) error { + if c.IsRestarting() { + return errContainerIsRestarting(c.ID) + } + return nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootIDs := daemon.idMappings.RootPair() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAndChown(shmPath, 0700, rootIDs); err != nil { + return err + } + + shmSize := int64(daemon.configStore.ShmSize) + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootIDs.UID, rootIDs.GID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + // retrieve possible remapped range start for root UID, GID + rootIDs := daemon.idMappings.RootPair() + // create tmpfs + if err := idtools.MkdirAllAndChown(localMountPath, 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating secret local mount path") + } + + defer func() { + if setupErr != nil { + // cleanup + _ = detachMounted(localMountPath) + + if err := os.RemoveAll(localMountPath); err != nil { + logrus.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID) + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to setup secret mount") + } + + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + for _, s := range c.SecretReferences { + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + logrus.Error("secret target type is not a file target") + continue + } + + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath := c.SecretFilePath(*s) + if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating secret mount path") + } + + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret, err := c.DependencyStore.Secrets().Get(s.SecretID) + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + + uid, err := strconv.Atoi(s.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(s.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for secret") + } + } + + label.Relabel(localMountPath, c.MountLabel, false) + + // remount secrets ro + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to remount secret dir as readonly") + } + + return nil +} + +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // retrieve possible remapped range start for root UID, GID + rootIDs := daemon.idMappings.RootPair() + // create tmpfs + if err := idtools.MkdirAllAndChown(localPath, 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating config path") + } + + log.Debug("injecting config") + config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) + if err != nil { + return errors.Wrap(err, "unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + + uid, err := strconv.Atoi(configRef.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(configRef.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for config") + } + } + + return nil +} + +func killProcessDirectly(cntr *container.Container) error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Block until the container to stops or timeout. + status := <-cntr.Wait(ctx, container.WaitConditionNotRunning) + if status.Err() != nil { + // Ensure that we don't kill ourselves + if pid := cntr.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return true +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + var err error + + container.HostsPath, err = container.GetRootResourcePath("hosts") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) + + container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath +} diff --git a/daemon/container_operations_windows.go b/daemon/container_operations_windows.go new file mode 100644 index 0000000..1c20ebd --- /dev/null +++ b/daemon/container_operations_windows.go @@ -0,0 +1,165 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork" + "github.com/pkg/errors" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // create local config root + if err := system.MkdirAllWithACL(localPath, 0, system.SddlAdministratorsLocalSystem); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + log.Debug("injecting config") + config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) + if err != nil { + return errors.Wrap(err, "unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + } + + return nil +} + +// getSize returns real size & virtual size +func (daemon *Daemon) getSize(containerID string) (int64, int64) { + // TODO Windows + return 0, 0 +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work +// against containers which have volumes. You will still be able to cp +// to somewhere on the container drive, but not to any mounted volumes +// inside the container. Without this fix, docker cp is broken to any +// container which has a volume, regardless of where the file is inside the +// container. +func (daemon *Daemon) mountVolumes(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + // create local secret root + if err := system.MkdirAllWithACL(localMountPath, 0, system.SddlAdministratorsLocalSystem); err != nil { + return errors.Wrap(err, "error creating secret local directory") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localMountPath); err != nil { + logrus.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + for _, s := range c.SecretReferences { + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + logrus.Error("secret target type is not a file target") + continue + } + + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath := c.SecretFilePath(*s) + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret, err := c.DependencyStore.Secrets().Get(s.SecretID) + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func isLinkable(child *container.Container) bool { + return false +} + +func enableIPOnPredefinedNetwork() bool { + return true +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { + container.NetworkSharedContainerID = nc.ID +} diff --git a/daemon/container_windows.go b/daemon/container_windows.go new file mode 100644 index 0000000..6fdd1e6 --- /dev/null +++ b/daemon/container_windows.go @@ -0,0 +1,11 @@ +//+build windows + +package daemon + +import ( + "github.com/docker/docker/container" +) + +func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { + return nil +} diff --git a/daemon/create.go b/daemon/create.go new file mode 100644 index 0000000..c675ece --- /dev/null +++ b/daemon/create.go @@ -0,0 +1,543 @@ +package daemon + +import ( + "archive/tar" + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "runtime" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + units "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/resin-os/librsync-go" +) + +// CreateManagedContainer creates a container that is managed by a Service +func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, true) +} + +// ContainerCreate creates a regular container +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, false) +} + +func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) { + start := time.Now() + if params.Config == nil { + return containertypes.ContainerCreateCreatedBody{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + container, err := daemon.create(params, managed) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + containerActions.WithValues("create").UpdateSince(start) + + return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + // TODO: @jhowardmsft LCOW support - at a later point, can remove the hard-coding + // to force the platform to be linux. + // Default the platform if not supplied + if params.Platform == "" { + params.Platform = runtime.GOOS + } + if system.LCOWSupported() { + params.Platform = "linux" + } + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + + if runtime.GOOS == "solaris" && img.OS != "solaris " { + return nil, errors.New("platform on which parent image was created is not Solaris") + } + imgID = img.ID() + + if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() { + return nil, errors.New("platform on which parent image was created is not Windows") + } + } + + // Make sure the platform requested matches the image + if img != nil { + if params.Platform != img.Platform() { + // Ignore this in LCOW mode. @jhowardmsft TODO - This will need revisiting later. + if !system.LCOWSupported() { + return nil, fmt.Errorf("cannot create a %s container from a %s image", params.Platform, img.Platform()) + } + } + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Platform, params.Config, params.HostConfig, imgID, managed); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.cleanupContainer(container, true, true); err != nil { + logrus.Errorf("failed to cleanup container on create error: %v", err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + container.HostConfig.StorageOpt = params.HostConfig.StorageOpt + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container, params.HostConfig.Runtime); err != nil { + return nil, err + } + + rootIDs := daemon.idMappings.RootPair() + if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil { + return nil, err + } + if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + daemon.updateContainerNetworkSettings(container, endpointsConfigs) + if err := daemon.Register(container); err != nil { + return nil, err + } + stateCtr.set(container.ID, "stopped") + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func toHostConfigSelinuxLabels(labels []string) []string { + for i, l := range labels { + labels[i] = "label=" + l + } + return labels +} + +func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) ([]string, error) { + for _, opt := range hostConfig.SecurityOpt { + con := strings.Split(opt, "=") + if con[0] == "label" { + // Caller overrode SecurityOpts + return nil, nil + } + } + ipcMode := hostConfig.IpcMode + pidMode := hostConfig.PidMode + privileged := hostConfig.Privileged + if ipcMode.IsHost() || pidMode.IsHost() || privileged { + return toHostConfigSelinuxLabels(label.DisableSecOpt()), nil + } + + var ipcLabel []string + var pidLabel []string + ipcContainer := ipcMode.Container() + pidContainer := pidMode.Container() + if ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + ipcLabel = label.DupSecOpt(c.ProcessLabel) + if pidContainer == "" { + return toHostConfigSelinuxLabels(ipcLabel), err + } + } + if pidContainer != "" { + c, err := daemon.GetContainer(pidContainer) + if err != nil { + return nil, err + } + + pidLabel = label.DupSecOpt(c.ProcessLabel) + if ipcContainer == "" { + return toHostConfigSelinuxLabels(pidLabel), err + } + } + + if pidLabel != nil && ipcLabel != nil { + for i := 0; i < len(pidLabel); i++ { + if pidLabel[i] != ipcLabel[i] { + return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") + } + } + return toHostConfigSelinuxLabels(pidLabel), nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container, runtime string) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + + initFunc := daemon.getLayerInit() + if runtime == "bare" { + initFunc = nil + } + + rwLayerOpts := &layer.CreateRWLayerOpts{ + MountLabel: container.MountLabel, + InitFunc: initFunc, + StorageOpt: container.HostConfig.StorageOpt, + } + + rwLayer, err := daemon.stores[container.Platform].layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the Engine API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + return apiV, nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + // Reset the Entrypoint if it is [""] + if len(config.Entrypoint) == 1 && config.Entrypoint[0] == "" { + config.Entrypoint = nil + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +// Checks if the client set configurations for more than one network while creating a container +// Also checks if the IPAMConfig is valid +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 { + return nil + } + if len(nwConfig.EndpointsConfig) == 1 { + for _, v := range nwConfig.EndpointsConfig { + if v != nil && v.IPAMConfig != nil { + if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address)) + } + if v.IPAMConfig.IPv6Address != "" { + n := net.ParseIP(v.IPAMConfig.IPv6Address) + // if the address is an invalid network address (ParseIP == nil) or if it is + // an IPv4 address (To4() != nil), then it is an invalid IPv6 address + if n == nil || n.To4() != nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address)) + } + } + } + } + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return apierrors.NewBadRequestError(err) +} + +// DeltaCreate creates a delta of the specified src and dest images +// This is called directly from the Engine API +func (daemon *Daemon) DeltaCreate(deltaSrc, deltaDest string, outStream io.Writer) error { + progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) + + srcImg, err := daemon.GetImage(deltaSrc) + if err != nil { + return errors.Wrapf(err, "no such image: %s", deltaSrc) + } + + dstImg, err := daemon.GetImage(deltaDest) + if err != nil { + return errors.Wrapf(err, "no such image: %s", deltaDest) + } + + is := daemon.stores[dstImg.Platform()].imageStore + ls := daemon.stores[dstImg.Platform()].layerStore + + srcData, err := is.GetTarSeekStream(srcImg.ID()) + if err != nil { + return err + } + defer srcData.Close() + + srcDataLen, err := ioutils.SeekerSize(srcData) + if err != nil { + return err + } + + progressReader := progress.NewProgressReader(srcData, progressOutput, srcDataLen, deltaSrc, "Fingerprinting") + defer progressReader.Close() + + srcSig, err := librsync.Signature(bufio.NewReaderSize(progressReader, 65536), ioutil.Discard, 512, 32, librsync.BLAKE2_SIG_MAGIC) + if err != nil { + return err + } + + progress.Update(progressOutput, deltaSrc, "Fingerprint complete") + + deltaRootFS := image.NewRootFS() + + for _, diffID := range dstImg.RootFS.DiffIDs { + progress.Update(progressOutput, stringid.TruncateID(diffID.String()), "Waiting") + } + + statTotalSize := int64(0) + statDeltaSize := int64(0) + + for i, diffID := range dstImg.RootFS.DiffIDs { + var ( + layerData io.Reader + platform layer.Platform + ) + commonLayer := false + + // We're only interested in layers that are different. Push empty + // layers for common layers + if i < len(srcImg.RootFS.DiffIDs) && srcImg.RootFS.DiffIDs[i] == diffID { + commonLayer = true + layerData, _ = layer.EmptyLayer.TarStream() + platform = layer.EmptyLayer.Platform() + } else { + dstRootFS := *dstImg.RootFS + dstRootFS.DiffIDs = dstRootFS.DiffIDs[:i+1] + + l, err := ls.Get(dstRootFS.ChainID()) + if err != nil { + return err + } + defer layer.ReleaseAndLog(ls, l) + + platform = l.Platform() + + input, err := l.TarStream() + if err != nil { + return err + } + defer input.Close() + + inputSize, err := l.DiffSize() + if err != nil { + return err + } + + statTotalSize += inputSize + + progressReader := progress.NewProgressReader(input, progressOutput, inputSize, stringid.TruncateID(diffID.String()), "Computing delta") + defer progressReader.Close() + + pR, pW := io.Pipe() + + layerData = pR + + tmpDelta, err := ioutil.TempFile("", "docker-delta-") + if err != nil { + return err + } + defer os.Remove(tmpDelta.Name()) + + go func() { + w := bufio.NewWriter(tmpDelta) + err := librsync.Delta(srcSig, bufio.NewReader(progressReader), w) + if err != nil { + pW.CloseWithError(err) + return + } + w.Flush() + + info, err := tmpDelta.Stat() + if err != nil { + pW.CloseWithError(err) + return + } + + tw := tar.NewWriter(pW) + + hdr := &tar.Header{ + Name: "delta", + Mode: 0600, + Size: info.Size(), + } + + if err := tw.WriteHeader(hdr); err != nil { + pW.CloseWithError(err) + return + } + + if _, err := tmpDelta.Seek(0, io.SeekStart); err != nil { + pW.CloseWithError(err) + return + } + + if _, err := io.Copy(tw, tmpDelta); err != nil { + pW.CloseWithError(err) + return + } + + if err := tw.Close(); err != nil { + pW.CloseWithError(err) + return + } + + pW.Close() + }() + } + + newLayer, err := ls.Register(layerData, deltaRootFS.ChainID(), platform) + if err != nil { + return err + } + defer layer.ReleaseAndLog(ls, newLayer) + + if commonLayer { + progress.Update(progressOutput, stringid.TruncateID(diffID.String()), "Skipping common layer") + } else { + deltaSize, err := newLayer.DiffSize() + if err != nil { + return err + } + statDeltaSize += deltaSize + progress.Update(progressOutput, stringid.TruncateID(diffID.String()), "Delta complete") + } + + deltaRootFS.Append(newLayer.DiffID()) + } + + config := image.Image{ + RootFS: deltaRootFS, + V1Image: image.V1Image{ + Created: time.Now().UTC(), + Config: &containertypes.Config{ + Labels: map[string]string{ + "io.resin.delta.base": srcImg.ID().String(), + "io.resin.delta.config": string(dstImg.RawJSON()), + }, + }, + }, + } + + rawConfig, err := json.Marshal(config) + if err != nil { + return err + } + + id, err := is.Create(rawConfig) + if err != nil { + return err + } + + humanTotal := units.HumanSize(float64(statTotalSize)) + humanDelta := units.HumanSize(float64(statDeltaSize)) + deltaRatio := float64(statTotalSize) / float64(statDeltaSize) + if statTotalSize == 0 { + deltaRatio = 1 + } + + outStream.Write(streamformatter.FormatStatus("", "Normal size: %s, Delta size: %s, %.2fx improvement", humanTotal, humanDelta, deltaRatio)) + outStream.Write(streamformatter.FormatStatus("", "Created delta: %s", id.String())) + return nil +} diff --git a/daemon/create_unix.go b/daemon/create_unix.go new file mode 100644 index 0000000..2501a33 --- /dev/null +++ b/daemon/create_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootIDs := daemon.idMappings.RootPair() + if err := container.SetupWorkingDirectory(rootIDs); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if mnt.Volume == nil { + continue + } + + if mnt.Type != mounttypes.TypeVolume || !mnt.CopyData { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/daemon/create_windows.go b/daemon/create_windows.go new file mode 100644 index 0000000..bbf0dbe --- /dev/null +++ b/daemon/create_windows.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "fmt" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + // Make sure the host config has the default daemon isolation if not specified by caller. + if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { + hostConfig.Isolation = daemon.defaultIsolation + } + + for spec := range config.Volumes { + + mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) + if err != nil { + return fmt.Errorf("Unrecognised volume spec: %v", err) + } + + // If the mountpoint doesn't have a name, generate one. + if len(mp.Name) == 0 { + mp.Name = stringid.GenerateNonCryptoID() + } + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(mp.Destination) { + continue + } + + volumeDriver := hostConfig.VolumeDriver + + // Create the volume in the volume driver. If it doesn't exist, + // a new one will be created. + v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + // FIXME Windows: This code block is present in the Linux version and + // allows the contents to be copied to the container FS prior to it + // being started. However, the function utilizes the FollowSymLinkInScope + // path which does not cope with Windows volume-style file paths. There + // is a separate effort to resolve this (@swernli), so this processing + // is deferred for now. A case where this would be useful is when + // a dockerfile includes a VOLUME statement, but something is created + // in that directory during the dockerfile processing. What this means + // on Windows for TP5 is that in that scenario, the contents will not + // copied, but that's (somewhat) OK as HCS will bomb out soon after + // at it doesn't support mapped directories which have contents in the + // destination path anyway. + // + // Example for repro later: + // FROM windowsservercore + // RUN mkdir c:\myvol + // RUN copy c:\windows\system32\ntdll.dll c:\myvol + // VOLUME "c:\myvol" + // + // Then + // docker build -t vol . + // docker run -it --rm vol cmd <-- This is where HCS will error out. + // + // // never attempt to copy existing content in a container FS to a shared volume + // if v.DriverName() == volume.DefaultDriverName { + // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { + // return err + // } + // } + + // Add it to container.MountPoints + container.AddMountPointWithVolume(mp.Destination, v, mp.RW) + } + return nil +} diff --git a/daemon/daemon.go b/daemon/daemon.go new file mode 100644 index 0000000..e2bde1f --- /dev/null +++ b/daemon/daemon.go @@ -0,0 +1,1284 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/daemon/stats" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/migrate/v1" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/plugin" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libtrust" + "github.com/pkg/errors" +) + +var ( + // DefaultRuntimeBinary is the default runtime to be used by + // containerd if none is specified + DefaultRuntimeBinary = "docker-runc" + + errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") +) + +type daemonStore struct { + graphDriver string + imageRoot string + imageStore image.Store + layerStore layer.Store + distributionMetadataStore dmetadata.Store + referenceStore refstore.Store +} + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + containersReplica container.ViewDB + execCommands *exec.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *config.Config + statsCollector *stats.Collector + defaultLogConfig containertypes.LogConfig + RegistryService registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discovery.Reloader + root string + seccompEnabled bool + apparmorEnabled bool + shutdown bool + idMappings *idtools.IDMappings + stores map[string]daemonStore // By container target platform + deltaStore *daemonStore + PluginStore *plugin.Store // todo: remove + pluginManager *plugin.Manager + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + containerdRemote libcontainerd.Remote + defaultIsolation containertypes.Isolation // Default isolation mode on Windows + clusterProvider cluster.Provider + cluster Cluster + metricsPluginListener net.Listener + + machineMemory uint64 + + seccompProfile []byte + seccompProfilePath string + + diskUsageRunning int32 + pruneRunning int32 + hosts map[string]bool // hosts stores the addresses the daemon is listening on + startupDone chan struct{} +} + +// StoreHosts stores the addresses the daemon is listening on +func (daemon *Daemon) StoreHosts(hosts []string) { + if daemon.hosts == nil { + daemon.hosts = make(map[string]bool) + } + for _, h := range hosts { + daemon.hosts[h] = true + } +} + +// HasExperimental returns whether the experimental features of the daemon are enabled or not +func (daemon *Daemon) HasExperimental() bool { + if daemon.configStore != nil && daemon.configStore.Experimental { + return true + } + return false +} + +func (daemon *Daemon) restore() error { + containers := make(map[string]*container.Container) + + logrus.Info("Loading containers: start.") + + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + currentDriverForContainerPlatform := daemon.stores[container.Platform].graphDriver + if (container.Driver == "" && currentDriverForContainerPlatform == "aufs") || container.Driver == currentDriverForContainerPlatform { + rwlayer, err := daemon.stores[container.Platform].layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + removeContainers := make(map[string]*container.Container) + restartContainers := make(map[*container.Container]chan struct{}) + activeSandboxes := make(map[string]interface{}) + for id, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container name %s: %s", c.ID, err) + delete(containers, id) + continue + } + // verify that all volumes valid and have been migrated from the pre-1.7 layout + if err := daemon.verifyVolumesInfo(c); err != nil { + // don't skip the container due to error + logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) + } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + + // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. + // We should rewrite it to use the daemon defaults. + // Fixes https://github.com/docker/docker/issues/22536 + if c.HostConfig.LogConfig.Type == "" { + if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { + logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) + continue + } + } + } + + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + daemon.backportMountSpec(c) + if err := daemon.checkpointAndSave(c); err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") + } + + daemon.setStateCounter(c) + if c.IsRunning() || c.IsPaused() { + c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking + if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { + logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) + return + } + + // we call Mount and then Unmount to get BaseFs of the container + if err := daemon.Mount(c); err != nil { + // The mount is unlikely to fail. However, in case mount fails + // the container should be allowed to restore here. Some functionalities + // (like docker exec -u user) might be missing but container is able to be + // stopped/restarted/removed. + // See #29365 for related information. + // The error is only logged here. + logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) + } else { + if err := daemon.Unmount(c); err != nil { + logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) + } + } + + c.ResetRestartManager(false) + if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { + options, err := daemon.buildSandboxOptions(c) + if err != nil { + logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) + } + mapLock.Lock() + activeSandboxes[c.NetworkSettings.SandboxID] = options + mapLock.Unlock() + } + + } + // fixme: only if not running + // get list of containers we need to restart + if !c.IsRunning() && !c.IsPaused() { + // Do not autostart containers which + // has endpoints in a swarm scope + // network yet since the cluster is + // not initialized yet. We will start + // it after the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } else if c.HostConfig != nil && c.HostConfig.AutoRemove { + mapLock.Lock() + removeContainers[c.ID] = c + mapLock.Unlock() + } + } + + c.Lock() + if c.RemovalInProgress { + // We probably crashed in the middle of a removal, reset + // the flag. + // + // We DO NOT remove the container here as we do not + // know if the user had requested for either the + // associated volumes, network links or both to also + // be removed. So we put the container in the "dead" + // state and leave further processing up to them. + logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) + c.RemovalInProgress = false + c.Dead = true + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("Failed to update container %s state: %v", c.ID, err) + } + } + c.Unlock() + }(c) + } + wg.Wait() + daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) + if err != nil { + return fmt.Errorf("Error initializing network controller: %v", err) + } + + // Now that all the containers are registered, register the links + for _, c := range containers { + c.Lock() + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + c.Unlock() + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + + // Make sure networks are available before starting + daemon.waitForNetworks(c) + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + removeGroup := sync.WaitGroup{} + for id := range removeContainers { + removeGroup.Add(1) + go func(cid string) { + if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("Failed to remove container %s: %s", cid, err) + } + removeGroup.Done() + }(id) + } + removeGroup.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume driver is not available. + if _, ok := restartContainers[c]; ok { + continue + } else if _, ok := removeContainers[c.ID]; ok { + // container is automatically removed, skip it. + continue + } + + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + logrus.Info("Loading containers: done.") + + return nil +} + +// RestartSwarmContainers restarts any autostart container which has a +// swarm endpoint. +func (daemon *Daemon) RestartSwarmContainers() { + group := sync.WaitGroup{} + for _, c := range daemon.List() { + if !c.IsRunning() && !c.IsPaused() { + // Autostart all the containers which has a + // swarm endpoint now that the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Error(err) + } + }(c) + } + } + + } + group.Wait() +} + +// waitForNetworks is used during daemon initialization when starting up containers +// It ensures that all of a container's networks are available before the daemon tries to start the container. +// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. +func (daemon *Daemon) waitForNetworks(c *container.Container) { + if daemon.discoveryWatcher == nil { + return + } + // Make sure if the container has a network that requires discovery that the discovery service is available before starting + for netName := range c.NetworkSettings.Networks { + // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready + // Most likely this is because the K/V store used for discovery is in a container and needs to be started + if _, err := daemon.netController.NetworkByName(netName); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + continue + } + // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host + // FIXME: why is this slow??? + logrus.Debugf("Container %s waiting for network to be ready", c.Name) + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(60 * time.Second): + } + return + } + } +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// DaemonJoinsCluster informs the daemon has joined the cluster and provides +// the handler to query the cluster component +func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { + daemon.setClusterProvider(clusterProvider) +} + +// DaemonLeavesCluster informs the daemon has left the cluster +func (daemon *Daemon) DaemonLeavesCluster() { + // Daemon is in charge of removing the attachable networks with + // connected containers when the node leaves the swarm + daemon.clearAttachableNetworks() + // We no longer need the cluster provider, stop it now so that + // the network agent will stop listening to cluster events. + daemon.setClusterProvider(nil) + // Wait for the networking cluster agent to stop + daemon.netController.AgentStopWait() + // Daemon is in charge of removing the ingress network when the + // node leaves the swarm. Wait for job to be done or timeout. + // This is called also on graceful daemon shutdown. We need to + // wait, because the ingress release has to happen before the + // network controller is stopped. + if done, err := daemon.ReleaseIngress(); err == nil { + select { + case <-done: + case <-time.After(5 * time.Second): + logrus.Warnf("timeout while waiting for ingress network removal") + } + } else { + logrus.Warnf("failed to initiate ingress network removal: %v", err) + } +} + +// setClusterProvider sets a component for querying the current cluster state. +func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { + daemon.clusterProvider = clusterProvider + daemon.netController.SetClusterProvider(clusterProvider) +} + +// IsSwarmCompatible verifies if the current daemon +// configuration is compatible with the swarm mode +func (daemon *Daemon) IsSwarmCompatible() error { + if daemon.configStore == nil { + return nil + } + return daemon.configStore.IsSwarmCompatible() +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure that we have a correct root key limit for launching containers. + if err := ModifyRootKeyLimit(); err != nil { + logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) + } + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + idMappings, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootIDs := idMappings.RootPair() + if err := setupDaemonProcess(config); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := prepareTempDir(config.Root, rootIDs) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := getRealPath(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{ + configStore: config, + startupDone: make(chan struct{}), + } + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + stackDumpDir := config.Root + if execRoot := config.GetExecRoot(); execRoot != "" { + stackDumpDir = execRoot + } + d.setupDumpStackTrap(stackDumpDir) + + if err := d.setupSeccompProfile(); err != nil { + return nil, err + } + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + if err := ensureDefaultAppArmorProfile(); err != nil { + logrus.Errorf(err.Error()) + } + + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil && !os.IsExist(err) { + return nil, err + } + + if runtime.GOOS == "windows" { + if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil && !os.IsExist(err) { + return nil, err + } + } + + // On Windows we don't support the environment variable, or a user supplied graphdriver + // as Windows has no choice in terms of which graphdrivers to use. It's a case of + // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, + // lcow. Unix platforms however run a single graphdriver for all containers, and it can + // be set through an environment variable, a daemon start parameter, or chosen through + // initialization of the layerstore through driver priority order for example. + d.stores = make(map[string]daemonStore) + if runtime.GOOS == "windows" { + d.stores["windows"] = daemonStore{graphDriver: "windowsfilter"} + if system.LCOWSupported() { + d.stores["linux"] = daemonStore{graphDriver: "lcow"} + } + } else { + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + d.stores[runtime.GOOS] = daemonStore{graphDriver: driverName} // May still be empty. Layerstore init determines instead. + } + + d.RegistryService = registryService + d.PluginStore = pluginStore + logger.RegisterPluginGetter(d.PluginStore) + + metricsSockPath, err := d.listenMetricsSock() + if err != nil { + return nil, err + } + registerMetricsPluginCallback(d.PluginStore, metricsSockPath) + + // Plugin system initialization should happen before restore. Do not change order. + d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ + Root: filepath.Join(config.Root, "plugins"), + ExecRoot: getPluginExecRoot(config.Root), + Store: d.PluginStore, + Executor: containerdRemote, + RegistryService: registryService, + LiveRestoreEnabled: config.LiveRestoreEnabled, + LogPluginEvent: d.LogPluginEvent, // todo: make private + AuthzMiddleware: config.AuthzMiddleware, + }) + if err != nil { + return nil, errors.Wrap(err, "couldn't create plugin manager") + } + + var graphDrivers []string + for platform, ds := range d.stores { + ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: ds.graphDriver, + GraphDriverOptions: config.GraphOptions, + IDMappings: idMappings, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + Platform: platform, + }) + if err != nil { + return nil, err + } + ds.graphDriver = ls.DriverName() // As layerstore may set the driver + ds.layerStore = ls + d.stores[platform] = ds + graphDrivers = append(graphDrivers, ls.DriverName()) + } + + if config.DeltaRoot != "" && config.DeltaGraphDriver != "" { + ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.DeltaRoot, + MetadataStorePathTemplate: filepath.Join(config.DeltaRoot, "image", "%s", "layerdb"), + GraphDriver: config.DeltaGraphDriver, + GraphDriverOptions: config.DeltaGraphOptions, + IDMappings: idMappings, + PluginGetter: nil, + ExperimentalEnabled: false, + Platform: runtime.GOOS, + }) + if err != nil { + return nil, err + } + + imageRoot := filepath.Join(config.DeltaRoot, "image", ls.DriverName()) + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + var is image.Store + is, err = image.NewImageStore(ifs, runtime.GOOS, ls) + if err != nil { + return nil, err + } + + d.deltaStore = &daemonStore{ + graphDriver: ls.DriverName(), + imageRoot: imageRoot, + imageStore: is, + layerStore: ls, + } + graphDrivers = append(graphDrivers, ls.DriverName()) + } + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDrivers); err != nil { + return nil, err + } + + logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) + lsMap := make(map[string]layer.Store) + for platform, ds := range d.stores { + lsMap[platform] = ds.layerStore + } + d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads) + logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) + d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) + + for platform, ds := range d.stores { + imageRoot := filepath.Join(config.Root, "image", ds.graphDriver) + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + var is image.Store + is, err = image.NewImageStore(ifs, platform, ds.layerStore) + if err != nil { + return nil, err + } + ds.imageRoot = imageRoot + ds.imageStore = is + d.stores[platform] = ds + } + + // Configure the volumes driver + volStore, err := d.configureVolumes(rootIDs) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700, ""); err != nil { + return nil, err + } + + eventsService := events.New() + + for platform, ds := range d.stores { + dms, err := dmetadata.NewFSMetadataStore(filepath.Join(ds.imageRoot, "distribution"), platform) + if err != nil { + return nil, err + } + + rs, err := refstore.NewReferenceStore(filepath.Join(ds.imageRoot, "repositories.json"), platform) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + ds.distributionMetadataStore = dms + ds.referenceStore = rs + d.stores[platform] = ds + + // No content-addressability migration on Windows as it never supported pre-CA + if runtime.GOOS != "windows" { + migrationStart := time.Now() + if err := v1.Migrate(config.Root, ds.graphDriver, ds.layerStore, ds.imageStore, rs, dms); err != nil { + logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + } + logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) + } + } + + // Discovery is only enabled when the daemon is launched with an address to advertise. When + // initialized, the daemon is registered and we can store the discovery backend as it's read-only + if err := d.initDiscovery(config); err != nil { + return nil, err + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux. + if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { + return nil, errors.New("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + if d.containersReplica, err = container.NewViewDB(); err != nil { + return nil, err + } + d.execCommands = exec.NewStore() + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.idMappings = idMappings + d.seccompEnabled = sysInfo.Seccomp + d.apparmorEnabled = sysInfo.AppArmor + + d.nameIndex = registrar.NewRegistrar() + d.linkIndex = newLinkIndex() + d.containerdRemote = containerdRemote + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + close(d.startupDone) + + // FIXME: this method never returns an error + info, _ := d.SystemInfo() + + engineInfo.WithValues( + dockerversion.Version, + dockerversion.GitCommit, + info.Architecture, + info.Driver, + info.KernelVersion, + info.OperatingSystem, + info.OSType, + info.ID, + ).Set(1) + engineCpus.Set(float64(info.NCPU)) + engineMemory.Set(float64(info.MemTotal)) + + gd := "" + for platform, ds := range d.stores { + if len(gd) > 0 { + gd += ", " + } + gd += ds.graphDriver + if len(d.stores) > 1 { + gd = fmt.Sprintf("%s (%s)", gd, platform) + } + } + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver(s)": gd, + }).Info("Docker daemon") + + return d, nil +} + +func (daemon *Daemon) waitForStartupDone() { + <-daemon.startupDone +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + stopTimeout := c.StopTimeout() + // TODO(windows): Handle docker restart with paused containers + if c.IsPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return errors.New("System does not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(stopTimeout)*time.Second) + defer cancel() + + // Wait with timeout for container to exit. + if status := <-c.Wait(ctx, container.WaitConditionNotRunning); status.Err() != nil { + logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return errors.New("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + // Wait for exit again without a timeout. + // Explicitly ignore the result. + _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning) + return status.Err() + } + } + // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, stopTimeout); err != nil { + return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) + } + + // Wait without timeout for the container to exit. + // Ignore the result. + _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning) + return nil +} + +// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, +// and is limited by daemon's ShutdownTimeout. +func (daemon *Daemon) ShutdownTimeout() int { + // By default we use daemon's ShutdownTimeout. + shutdownTimeout := daemon.configStore.ShutdownTimeout + + graceTimeout := 5 + if daemon.containers != nil { + for _, c := range daemon.containers.List() { + if shutdownTimeout >= 0 { + stopTimeout := c.StopTimeout() + if stopTimeout < 0 { + shutdownTimeout = -1 + } else { + if stopTimeout+graceTimeout > shutdownTimeout { + shutdownTimeout = stopTimeout + graceTimeout + } + } + } + } + } + return shutdownTimeout +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + // Keep mounts and networking running on daemon shutdown if + // we are to keep containers running and restore them. + + if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { + // check if there are any running containers, if none we should do some cleanup + if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + // metrics plugins still need some cleanup + daemon.cleanupMetricsPlugins() + return nil + } + } + + if daemon.containers != nil { + logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.stores[c.Platform].layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.volumes != nil { + if err := daemon.volumes.Shutdown(); err != nil { + logrus.Errorf("Error shutting down volume store: %v", err) + } + } + + for platform, ds := range daemon.stores { + if ds.layerStore != nil { + if err := ds.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, platform) + } + } + } + + // If we are part of a cluster, clean up cluster's stuff + if daemon.clusterProvider != nil { + logrus.Debugf("start clean shutdown of cluster resources...") + daemon.DaemonLeavesCluster() + } + + daemon.cleanupMetricsPlugins() + + // Shutdown plugins after containers and layerstore. Don't change the order. + daemon.pluginShutdown() + + // trigger libnetwork Stop only if it's initialized + if daemon.netController != nil { + daemon.netController.Stop() + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + + return nil +} + +// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. +func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { + var v4Subnets []net.IPNet + var v6Subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + v4infos, v6infos := managedNetwork.Info().IpamInfo() + for _, info := range v4infos { + if info.IPAMData.Pool != nil { + v4Subnets = append(v4Subnets, *info.IPAMData.Pool) + } + } + for _, info := range v6infos { + if info.IPAMData.Pool != nil { + v6Subnets = append(v6Subnets, *info.IPAMData.Pool) + } + } + } + + return v4Subnets, v6Subnets +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName(platform string) string { + return daemon.stores[platform].layerStore.DriverName() +} + +// prepareTempDir prepares and returns the default directory to use +// for temporary files. +// If it doesn't exist, it is created. If it exists, its content is removed. +func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + newName := tmpDir + "-old" + if err := os.Rename(tmpDir, newName); err == nil { + go func() { + if err := os.RemoveAll(newName); err != nil { + logrus.Warnf("failed to delete old tmp directory: %s", newName) + } + }() + } else { + logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) + if err := os.RemoveAll(tmpDir); err != nil { + logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) + } + } + } + // We don't remove the content of tmpdir if it's not the default, + // it may hold things that do not belong to us. + return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootIDs := daemon.idMappings.RootPair() + return initlayer.Setup(initPath, rootIDs) +} + +func setDefaultMtu(conf *config.Config) { + // do nothing if the config does not have the default 0 value. + if conf.Mtu != 0 { + return + } + conf.Mtu = config.DefaultNetworkMtu +} + +func (daemon *Daemon) configureVolumes(rootIDs idtools.IDPair) (*store.VolumeStore, error) { + volumesDriver, err := local.New(daemon.configStore.Root, rootIDs) + if err != nil { + return nil, err + } + + volumedrivers.RegisterPluginGetter(daemon.PluginStore) + + if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { + return nil, errors.New("local volume driver could not be registered") + } + return store.New(daemon.configStore.Root) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// initDiscovery initializes the discovery watcher for this daemon. +func (daemon *Daemon) initDiscovery(conf *config.Config) error { + advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) + if err != nil { + if err == discovery.ErrDiscoveryDisabled { + return nil + } + return err + } + + conf.ClusterAdvertise = advertise + discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + + daemon.discoveryWatcher = discoveryWatcher + return nil +} + +func isBridgeNetworkDisabled(conf *config.Config) bool { + return conf.BridgeConfig.Iface == config.DisableNetworkBridge +} + +func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { + options := []nwconfig.Option{} + if dconfig == nil { + return options, nil + } + + options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) + options = append(options, nwconfig.OptionDataDir(dconfig.Root)) + options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) + + dd := runconfig.DefaultDaemonNetworkMode() + dn := runconfig.DefaultDaemonNetworkMode().NetworkName() + options = append(options, nwconfig.OptionDefaultDriver(string(dd))) + options = append(options, nwconfig.OptionDefaultNetwork(dn)) + + if strings.TrimSpace(dconfig.ClusterStore) != "" { + kv := strings.Split(dconfig.ClusterStore, "://") + if len(kv) != 2 { + return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") + } + options = append(options, nwconfig.OptionKVProvider(kv[0])) + options = append(options, nwconfig.OptionKVProviderURL(kv[1])) + } + if len(dconfig.ClusterOpts) > 0 { + options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) + } + + if daemon.discoveryWatcher != nil { + options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) + } + + if dconfig.ClusterAdvertise != "" { + options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) + } + + options = append(options, nwconfig.OptionLabels(dconfig.Labels)) + options = append(options, driverOptions(dconfig)...) + + if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { + options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) + } + + if pg != nil { + options = append(options, nwconfig.OptionPluginGetter(pg)) + } + + return options, nil +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} + +// GetCluster returns the cluster +func (daemon *Daemon) GetCluster() Cluster { + return daemon.cluster +} + +// SetCluster sets the cluster +func (daemon *Daemon) SetCluster(cluster Cluster) { + daemon.cluster = cluster +} + +func (daemon *Daemon) pluginShutdown() { + manager := daemon.pluginManager + // Check for a valid manager object. In error conditions, daemon init can fail + // and shutdown called, before plugin manager is initialized. + if manager != nil { + manager.Shutdown() + } +} + +// PluginManager returns current pluginManager associated with the daemon +func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method + return daemon.pluginManager +} + +// PluginGetter returns current pluginStore associated with the daemon +func (daemon *Daemon) PluginGetter() *plugin.Store { + return daemon.PluginStore +} + +// CreateDaemonRoot creates the root for the daemon +func CreateDaemonRoot(config *config.Config) error { + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = getRealPath(config.Root) + if err != nil { + return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + idMappings, err := setupRemappedRoot(config) + if err != nil { + return err + } + return setupDaemonRoot(config, realRoot, idMappings.RootPair()) +} + +// checkpointAndSave grabs a container lock to safely call container.CheckpointTo +func (daemon *Daemon) checkpointAndSave(container *container.Container) error { + container.Lock() + defer container.Unlock() + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return fmt.Errorf("Error saving container state: %v", err) + } + return nil +} diff --git a/daemon/daemon_experimental.go b/daemon/daemon_experimental.go new file mode 100644 index 0000000..fb0251d --- /dev/null +++ b/daemon/daemon_experimental.go @@ -0,0 +1,7 @@ +package daemon + +import "github.com/docker/docker/api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/daemon/daemon_linux.go b/daemon/daemon_linux.go new file mode 100644 index 0000000..000a048 --- /dev/null +++ b/daemon/daemon_linux.go @@ -0,0 +1,93 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/mount" +) + +// On Linux, plugins use a static path for storing execution state, +// instead of deriving path from daemon's exec-root. This is because +// plugin socket files are created here and they cannot exceed max +// path length of 108 bytes. +func getPluginExecRoot(root string) string { + return "/run/docker/plugins" +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} + +func getRealPath(path string) (string, error) { + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/daemon/daemon_linux_test.go b/daemon/daemon_linux_test.go new file mode 100644 index 0000000..c7d5117 --- /dev/null +++ b/daemon/daemon_linux_test.go @@ -0,0 +1,104 @@ +// +build linux + +package daemon + +import ( + "strings" + "testing" +) + +const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio +143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 +145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 +149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset +150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu +151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct +152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory +153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices +154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer +155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio +156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event +157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb +158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +89 142 0:87 / /tmp rw,relatime - tmpfs none rw +97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw +100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio +116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio +242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw +120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio +171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio +310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw +` + +func TestCleanupMounts(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) + + if unmounted != 1 { + t.Fatal("Expected to unmount the shm (and the shm only)") + } +} + +func TestCleanupMountsByID(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) + + if unmounted != 1 { + t.Fatal("Expected to unmount the auf root (and that only)") + } +} + +func TestNotCleanupMounts(t *testing.T) { + d := &Daemon{ + repository: "", + } + var unmounted bool + unmount := func(target string) error { + unmounted = true + return nil + } + mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` + d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) + if unmounted { + t.Fatal("Expected not to clean up /dev/shm") + } +} diff --git a/daemon/daemon_solaris.go b/daemon/daemon_solaris.go new file mode 100644 index 0000000..7f2004e --- /dev/null +++ b/daemon/daemon_solaris.go @@ -0,0 +1,532 @@ +// +build solaris,cgo + +package daemon + +import ( + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + refstore "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/solaris/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + lntypes "github.com/docker/libnetwork/types" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +//#include +import "C" + +const ( + defaultVirtualSwitch = "Virtual Switch" + platformSupported = true + solarisMinCPUShares = 1 + solarisMaxCPUShares = 65535 +) + +func getMemoryResources(config containertypes.Resources) specs.CappedMemory { + memory := specs.CappedMemory{} + + if config.Memory > 0 { + memory.Physical = strconv.FormatInt(config.Memory, 10) + } + + if config.MemorySwap != 0 { + memory.Swap = strconv.FormatInt(config.MemorySwap, 10) + } + + return memory +} + +func getCPUResources(config containertypes.Resources) specs.CappedCPU { + cpu := specs.CappedCPU{} + + if config.CpusetCpus != "" { + cpu.Ncpus = config.CpusetCpus + } + + return cpu +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + return nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + //Since config.SecurityOpt is specifically defined as a "List of string values to + //customize labels for MLs systems, such as SELinux" + //until we figure out how to map to Trusted Extensions + //this is being disabled for now on Solaris + var ( + labelOpts []string + err error + ) + + if len(config.SecurityOpt) > 0 { + return errors.New("Security options are not supported on Solaris") + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + return nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + // solaris can rely upon checkSystem() below, we don't skew kernel versions + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig.CPUShares < 0 { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares) + hostConfig.CPUShares = solarisMinCPUShares + } else if hostConfig.CPUShares > solarisMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares) + hostConfig.CPUShares = solarisMaxCPUShares + } + + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + + if hostConfig.ShmSize != 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return false +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and + // therefore we will not do that for Docker container either. + if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + hostConfig.Memory = 0 + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. + + if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + hostConfig.MemorySwappiness = nil + } + if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + hostConfig.MemoryReservation = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + hostConfig.KernelMemory = 0 + } + if hostConfig.CPUShares != 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + hostConfig.CPUShares = 0 + } + if hostConfig.CPUShares < 0 { + warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() { + warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.") + logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.") + hostConfig.CPUShares = 0 + } + + // Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to. + if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + if hostConfig.CPUQuota > 0 { + warnings = append(warnings, "Quota will be applied on default period, not period specified.") + logrus.Warnf("Quota will be applied on default period, not period specified.") + } + hostConfig.CPUPeriod = 0 + } + if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUQuota < 0 { + warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + hostConfig.CpusetCpus = "" + hostConfig.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems) + } + if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + hostConfig.BlkioWeight = 0 + } + if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable { + *hostConfig.OomKillDisable = false + // Don't warn; this is the default setting but only applicable to Linux + } + + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + + // Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them. + + if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil { + warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + hostConfig.CapAdd = nil + hostConfig.CapDrop = nil + } + + if hostConfig.GroupAdd != nil { + warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.") + logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.") + hostConfig.GroupAdd = nil + } + + if hostConfig.IpcMode != "" { + warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + hostConfig.IpcMode = "" + } + + if hostConfig.PidMode != "" { + warnings = append(warnings, "PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + logrus.Warnf("PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + hostConfig.PidMode = "" + } + + if hostConfig.Privileged { + warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + hostConfig.Privileged = false + } + + if hostConfig.UTSMode != "" { + warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + hostConfig.UTSMode = "" + } + + if hostConfig.CgroupParent != "" { + warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + hostConfig.CgroupParent = "" + } + + if hostConfig.Ulimits != nil { + warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + hostConfig.Ulimits = nil + } + + return warnings, nil +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(config *Config, attributes map[string]string) { +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + stockRuntimeOpts := []string{} + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} + + // checkSystem validates platform-specific requirements + return nil +} + +func checkSystem() error { + // check OS version for compatibility, ensure running in global zone + var err error + var id C.zoneid_t + + if id, err = C.getzoneid(); err != nil { + return fmt.Errorf("Exiting. Error getting zone id: %+v", err) + } + if int(id) != 0 { + return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") + } + + v, err := kernel.GetKernelVersion() + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { + return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) + } + return err +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + // Initialize default network on "null" + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)); err != nil { + return nil, fmt.Errorf("Error creating default 'null' network: %v", err) + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ipamV4Conf *libnetwork.IpamConf + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, _, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(false)) + if err != nil { + return fmt.Errorf("Error creating default 'bridge' network: %v", err) + } + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs refstore.Store) error { + // Solaris has no custom images to register + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + return nil, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + return types.RootFS{} +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} + +func getRealPath(path string) (string, error) { + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go new file mode 100644 index 0000000..6f07d0d --- /dev/null +++ b/daemon/daemon_test.go @@ -0,0 +1,302 @@ +// +build !solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" +) + +// +// https://github.com/docker/docker/issues/8069 +// + +func TestGetContainer(t *testing.T) { + c1 := &container.Container{ + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", + } + + c2 := &container.Container{ + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", + } + + c3 := &container.Container{ + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", + } + + c4 := &container.Container{ + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + } + + c5 := &container.Container{ + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + store.Add(c3.ID, c3) + store.Add(c4.ID, c4) + store.Add(c5.ID, c5) + + index := truncindex.NewTruncIndex([]string{}) + index.Add(c1.ID) + index.Add(c2.ID) + index.Add(c3.ID) + index.Add(c4.ID) + index.Add(c5.ID) + + daemon := &Daemon{ + containers: store, + idIndex: index, + nameIndex: registrar.NewRegistrar(), + } + + daemon.reserveName(c1.ID, c1.Name) + daemon.reserveName(c2.ID, c2.Name) + daemon.reserveName(c3.ID, c3.Name) + daemon.reserveName(c4.ID, c4.Name) + daemon.reserveName(c5.ID, c5.Name) + + if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { + t.Fatal("Should explicitly match full container IDs") + } + + if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { + t.Fatal("Should match a partial ID") + } + + if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { + t.Fatal("Should match a full name") + } + + // c3.Name is a partial match for both c3.ID and c2.ID + if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { + t.Fatal("Should match a full name even though it collides with another container's ID") + } + + if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { + t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID") + } + + if _, err := daemon.GetContainer("3cdbd1"); err == nil { + t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") + } + + if _, err := daemon.GetContainer("nothing"); err == nil { + t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") + } +} + +func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { + var err error + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.volumes, err = store.New(tmp) + if err != nil { + return nil, err + } + + volumesDriver, err := local.New(tmp, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + return nil, err + } + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + + return daemon, nil +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} + +func TestContainerInitDNS(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-container-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` + + // Container struct only used to retrieve path to config file + container := &container.Container{Root: containerPath} + configPath, err := container.ConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + + hostConfigPath, err := container.HostConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonWithVolumeStore(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerID) + if err != nil { + t.Fatal(err) + } + + if c.HostConfig.DNS == nil { + t.Fatal("Expected container DNS to not be nil") + } + + if c.HostConfig.DNSSearch == nil { + t.Fatal("Expected container DNSSearch to not be nil") + } + + if c.HostConfig.DNSOptions == nil { + t.Fatal("Expected container DNSOptions to not be nil") + } +} + +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + portsImage := make(nat.PortSet) + portsImage[newPortNoError("tcp", "1111")] = struct{}{} + portsImage[newPortNoError("tcp", "2222")] = struct{}{} + configImage := &containertypes.Config{ + ExposedPorts: portsImage, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + portsUser := make(nat.PortSet) + portsUser[newPortNoError("tcp", "2222")] = struct{}{} + portsUser[newPortNoError("tcp", "3333")] = struct{}{} + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &containertypes.Config{ + ExposedPorts: portsUser, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &containertypes.Config{ + ExposedPorts: ports, + } + + if err := merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + } + } +} diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go new file mode 100644 index 0000000..21e3acc --- /dev/null +++ b/daemon/daemon_unix.go @@ -0,0 +1,1326 @@ +// +build linux freebsd + +package daemon + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/blkiodev" + pblkiodev "github.com/docker/docker/api/types/blkiodev" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/image" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/options" + lntypes "github.com/docker/libnetwork/types" + "github.com/golang/protobuf/ptypes" + "github.com/opencontainers/runc/libcontainer/cgroups" + rsystem "github.com/opencontainers/runc/libcontainer/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/vishvananda/netlink" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { + memory := specs.LinuxMemory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap > 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { + cpu := specs.LinuxCPU{} + + if config.CPUShares < 0 { + return nil, fmt.Errorf("shares: invalid argument") + } + if config.CPUShares >= 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpu.Cpus = config.CpusetCpus + } + + if config.CpusetMems != "" { + cpu.Mems = config.CpusetMems + } + + if config.NanoCPUs > 0 { + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + period := uint64(100 * time.Millisecond / time.Microsecond) + quota := config.NanoCPUs * int64(period) / 1e9 + cpu.Period = &period + cpu.Quota = "a + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + q := config.CPUQuota + cpu.Quota = &q + } + + if config.CPURealtimePeriod != 0 { + period := uint64(config.CPURealtimePeriod) + cpu.RealtimePeriod = &period + } + + if config.CPURealtimeRuntime != 0 { + c := config.CPURealtimeRuntime + cpu.RealtimeRuntime = &c + } + + return &cpu, nil +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.LinuxWeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.LinuxWeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.NoNewPrivileges = daemon.configStore.NoNewPrivileges + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + continue + } + if opt == "disable" { + labelOpts = append(labelOpts, "disable") + continue + } + + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") + } + if len(con) != 2 { + return fmt.Errorf("invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + case "no-new-privileges": + noNewPrivileges, err := strconv.ParseBool(con[1]) + if err != nil { + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + container.NoNewPrivileges = noNewPrivileges + default: + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { + var throttleDevices []specs.LinuxThrottleDevice + var stat syscall.Stat_t + + for _, d := range devs { + if err := syscall.Stat(d.Path, &stat); err != nil { + return nil, err + } + d := specs.LinuxThrottleDevice{Rate: d.Rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + throttleDevices = append(throttleDevices, d) + } + + return throttleDevices, nil +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + // Docker 1.11 and above doesn't actually run on kernels older than 3.4, + // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). + if !kernel.CheckKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = config.DefaultShmSize + if daemon.configStore != nil { + hostConfig.ShmSize = int64(daemon.configStore.ShmSize) + } + } + var err error + opts, err := daemon.generateSecurityOpt(hostConfig) + if err != nil { + return err + } + hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) + if hostConfig.MemorySwappiness == nil { + defaultSwappiness := int64(-1) + hostConfig.MemorySwappiness = &defaultSwappiness + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") + logrus.Warn("Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") + logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") + logrus.Warn("Your kernel does not support OomKillDisable. OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + logrus.Warn("Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") + } + if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) { + return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted") + } + // The highest precision we could get on Linux is 0.001, by setting + // cpu.cfs_period_us=1000ms + // cpu.cfs_quota=1ms + // See the following link for details: + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. + // The error message is 0.01 so that this is consistent with Windows + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + logrus.Warn("Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + logrus.Warn("Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { + return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + logrus.Warn("Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + resources.CPUQuota = 0 + } + if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { + return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") + } + if resources.CPUPercent > 0 { + warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) + logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) + resources.CPUPercent = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + logrus.Warn("Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + logrus.Warn("Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") + } + if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { + return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + logrus.Warn("Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O read limit in IO or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *config.Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *config.Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *config.Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + var warnings []string + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + + // no matter err is nil or not, w could have data in itself. + warnings = append(warnings, w...) + + if err != nil { + return warnings, err + } + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size can not be less than 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) + } + + // ip-forwarding does not affect container with '--net=host' (or '--net=none') + if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warn("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + if hostConfig.Runtime == "" { + hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + } + + if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { + return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) + } + + for dest := range hostConfig.Tmpfs { + if err := volume.ValidateTmpfsMountDestination(dest); err != nil { + return warnings, err + } + } + + return warnings, nil +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]string) { + if conf.IsValueSet("runtimes") { + daemon.configStore.Runtimes = conf.Runtimes + // Always set the default one + daemon.configStore.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + } + + if conf.DefaultRuntime != "" { + daemon.configStore.DefaultRuntime = conf.DefaultRuntime + } + + if conf.IsValueSet("default-shm-size") { + daemon.configStore.ShmSize = conf.ShmSize + } + + // Update attributes + var runtimeList bytes.Buffer + for name, rt := range daemon.configStore.Runtimes { + if runtimeList.Len() > 0 { + runtimeList.WriteRune(' ') + } + runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) + } + + attributes["runtimes"] = runtimeList.String() + attributes["default-runtime"] = daemon.configStore.DefaultRuntime + attributes["default-shm-size"] = fmt.Sprintf("%d", daemon.configStore.ShmSize) +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(conf *config.Config) error { + // Check for mutually incompatible config options + if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { + conf.BridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(conf); err != nil { + return err + } + if conf.CgroupParent != "" && UsingSystemd(conf) { + if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + + if conf.DefaultRuntime == "" { + conf.DefaultRuntime = config.StockRuntimeName + } + if conf.Runtimes == nil { + conf.Runtimes = make(map[string]types.Runtime) + } + conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + conf.Runtimes["bare"] = types.Runtime{} + + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *config.Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +func overlaySupportsSelinux() (bool, error) { + f, err := os.Open("/proc/kallsyms") + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + defer f.Close() + + var symAddr, symType, symName, text string + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return false, err + } + + text = s.Text() + if _, err := fmt.Sscanf(text, "%s %s %s", &symAddr, &symType, &symName); err != nil { + return false, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + + // Check for presence of symbol security_inode_copy_up. + if symName == "security_inode_copy_up" { + return true, nil + } + } + return false, nil +} + +// configureKernelSecuritySupport configures and validates security support for the kernel +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { + if config.EnableSelinuxSupport { + if !selinuxEnabled() { + logrus.Warn("Docker could not enable SELinux on the host system") + return nil + } + + overlayFound := false + for _, d := range driverNames { + if d == "overlay" || d == "overlay2" { + overlayFound = true + break + } + } + + if overlayFound { + // If driver is overlay or overlay2, make sure kernel + // supports selinux with overlay. + supported, err := overlaySupportsSelinux() + if err != nil { + return err + } + + if !supported { + logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverNames) + } + } + } else { + selinuxSetDisabled() + } + return nil +} + +func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + if len(activeSandboxes) > 0 { + logrus.Info("There are old running containers, the network config will not take affect") + return controller, nil + } + + // Initialize default network on "null" + if n, _ := controller.NetworkByName("none"); n == nil { + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) + } + } + + // Initialize default network on "host" + if n, _ := controller.NetworkByName("host"); n == nil { + if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) + } + } + + // Clear stale bridge network + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return nil, fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } else { + removeDefaultBridgeInterface() + } + + return controller, nil +} + +func driverOptions(config *config.Config) []nwconfig.Option { + bridgeConfig := options.Generic{ + "EnableIPForwarding": config.BridgeConfig.EnableIPForward, + "EnableIPTables": config.BridgeConfig.EnableIPTables, + "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, + "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath} + bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} + + dOptions := []nwconfig.Option{} + dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) + return dOptions +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { + bridgeName := bridge.DefaultBridgeName + if config.BridgeConfig.Iface != "" { + bridgeName = config.BridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.BridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() + } + + var ( + ipamV4Conf *libnetwork.IpamConf + ipamV6Conf *libnetwork.IpamConf + ) + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.BridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.BridgeConfig.IP + ip, _, err := net.ParseCIDR(config.BridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.BridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.BridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() + } + + var deferIPv6Alloc bool + if config.BridgeConfig.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) + if err != nil { + return err + } + + // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has + // at least 48 host bits, we need to guarantee the current behavior where the containers' + // IPv6 addresses will be constructed based on the containers' interface MAC address. + // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints + // on this network until after the driver has created the endpoint and returned the + // constructed address. Libnetwork will then reserve this address with the ipam driver. + ones, _ := fCIDRv6.Mask.Size() + deferIPv6Alloc = ones <= 80 + + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.PreferredPool = fCIDRv6.String() + + // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 + // address belongs to the same network, we need to inform libnetwork about it, so + // that it can be reserved with IPAM and it will not be given away to somebody else + for _, nw6 := range nw6List { + if fCIDRv6.Contains(nw6.IP) { + ipamV6Conf.Gateway = nw6.IP.String() + break + } + } + } + + if config.BridgeConfig.DefaultGatewayIPv6 != nil { + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + if ipamV6Conf != nil { + v6Conf = append(v6Conf, ipamV6Conf) + } + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) + if err != nil { + return fmt.Errorf("Error creating default \"bridge\" network: %v", err) + } + return nil +} + +// Remove default bridge interface if present (--bridge=none use case) +func removeDefaultBridgeInterface() { + if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { + if err := netlink.LinkDel(lnk); err != nil { + logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) + } + } +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return daemon.setupInitLayer +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := idtools.LookupUID(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := idtools.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := idtools.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to an unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := idtools.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return &idtools.IDMappings{}, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + mappings, err := idtools.NewIDMappings(username, groupname) + if err != nil { + return nil, errors.Wrapf(err, "Can't create ID mappings: %v") + } + return mappings, nil + } + return &idtools.IDMappings{}, nil +} + +func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootIDs.UID, rootIDs.GID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exist + if err := idtools.MkdirAllAndChown(config.Root, 0700, rootIDs); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + // we also need to verify that any pre-existing directories in the path to + // the graphroot won't block access to remapped root--if any pre-existing directory + // has strict permissions that don't allow "x", container start will fail, so + // better to warn and fail now + dirPath := config.Root + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if !idtools.CanAccess(dirPath, rootIDs) { + return fmt.Errorf("A subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories.", config.Root) + } + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := opts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + _, err := container.WriteHostConfig() + return err +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.machineMemory && daemon.machineMemory > 0 { + s.MemoryStats.Limit = daemon.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read, err = ptypes.Timestamp(stats.Timestamp) + if err != nil { + return nil, err + } + return s, nil +} + +// setDefaultIsolation determines the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +// setupDaemonProcess sets various settings for the daemon's process +func setupDaemonProcess(config *config.Config) error { + // setup the daemons oom_score_adj + return setupOOMScoreAdj(config.OOMScoreAdjust) +} + +func setupOOMScoreAdj(score int) error { + f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !rsystem.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) + } + return nil + } + + return err +} + +func (daemon *Daemon) initCgroupsPath(path string) error { + if path == "/" || path == "." { + return nil + } + + if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 { + return nil + } + + // Recursively create cgroup to ensure that the system and all parent cgroups have values set + // for the period and runtime as this limits what the children can be set to. + daemon.initCgroupsPath(filepath.Dir(path)) + + mnt, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") + if err != nil { + return err + } + // When docker is run inside docker, the root is based of the host cgroup. + // Should this be handled in runc/libcontainer/cgroups ? + if strings.HasPrefix(root, "/docker/") { + root = "/" + } + + path = filepath.Join(mnt, root, path) + sysinfo := sysinfo.New(true) + if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { + return err + } + if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path); err != nil { + return err + } + return nil +} + +func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error { + if sysinfoPresent && configValue != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + if daemon.configStore.SeccompProfile != "" { + daemon.seccompProfilePath = daemon.configStore.SeccompProfile + b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile) + if err != nil { + return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err) + } + daemon.seccompProfile = b + } + return nil +} diff --git a/daemon/daemon_unix_test.go b/daemon/daemon_unix_test.go new file mode 100644 index 0000000..d6b70f0 --- /dev/null +++ b/daemon/daemon_unix_test.go @@ -0,0 +1,319 @@ +// +build !windows,!solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" +) + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUShares(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMinCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMaxCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUSharesNoAdjustment(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMinCPUShares-1 { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMaxCPUShares+1 { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestParseSecurityOptWithDeprecatedColon(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseSecurityOpt(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseNNPSecurityOptions(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{NoNewPrivileges: true}, + } + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test NNP when "daemon:true" and "no-new-privileges=false"" + config.SecurityOpt = []string{"no-new-privileges=false"} + + if err := daemon.parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err) + } + if container.NoNewPrivileges { + t.Fatalf("container.NoNewPrivileges should be FALSE: %v", container.NoNewPrivileges) + } + + // test NNP when "daemon:false" and "no-new-privileges=true"" + daemon.configStore.NoNewPrivileges = false + config.SecurityOpt = []string{"no-new-privileges=true"} + + if err := daemon.parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err) + } + if !container.NoNewPrivileges { + t.Fatalf("container.NoNewPrivileges should be TRUE: %v", container.NoNewPrivileges) + } +} + +func TestNetworkOptions(t *testing.T) { + daemon := &Daemon{} + dconfigCorrect := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "consul://localhost:8500", + ClusterAdvertise: "192.168.0.1:8000", + }, + } + + if _, err := daemon.networkOptions(dconfigCorrect, nil, nil); err != nil { + t.Fatalf("Expect networkOptions success, got error: %v", err) + } + + dconfigWrong := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "consul://localhost:8500://test://bbb", + }, + } + + if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil { + t.Fatal("Expected networkOptions error, got nil") + } +} + +func TestMigratePre17Volumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "test-daemon-volumes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + volumeRoot := filepath.Join(rootDir, "volumes") + err = os.MkdirAll(volumeRoot, 0755) + if err != nil { + t.Fatal(err) + } + + containerRoot := filepath.Join(rootDir, "containers") + cid := "1234" + err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) + + vid := "5678" + vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) + err = os.MkdirAll(vfsPath, 0755) + if err != nil { + t.Fatal(err) + } + + config := []byte(` + { + "Config": {}, + "ID": "` + cid + `", + "Volumes": { + "/foo": "` + vfsPath + `", + "/bar": "/foo", + "/quux": "/quux" + }, + "VolumesRW": { + "/foo": true, + "/bar": true, + "/quux": false + } + } + `) + + volStore, err := store.New(volumeRoot) + if err != nil { + t.Fatal(err) + } + drv, err := local.New(volumeRoot, idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + t.Fatal(err) + } + volumedrivers.Register(drv, volume.DefaultDriverName) + + daemon := &Daemon{ + root: rootDir, + repository: containerRoot, + volumes: volStore, + } + err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) + if err != nil { + t.Fatal(err) + } + c, err := daemon.load(cid) + if err != nil { + t.Fatal(err) + } + if err := daemon.verifyVolumesInfo(c); err != nil { + t.Fatal(err) + } + + expected := map[string]volume.MountPoint{ + "/foo": {Destination: "/foo", RW: true, Name: vid}, + "/bar": {Source: "/foo", Destination: "/bar", RW: true}, + "/quux": {Source: "/quux", Destination: "/quux", RW: false}, + } + for id, mp := range c.MountPoints { + x, exists := expected[id] + if !exists { + t.Fatal("volume not migrated") + } + if mp.Source != x.Source || mp.Destination != x.Destination || mp.RW != x.RW || mp.Name != x.Name { + t.Fatalf("got unexpected mountpoint, expected: %+v, got: %+v", x, mp) + } + } +} diff --git a/daemon/daemon_unsupported.go b/daemon/daemon_unsupported.go new file mode 100644 index 0000000..cb1acf6 --- /dev/null +++ b/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows,!solaris + +package daemon + +const platformSupported = false diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go new file mode 100644 index 0000000..c77f32a --- /dev/null +++ b/daemon/daemon_windows.go @@ -0,0 +1,654 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/datastore" + winlibnetwork "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + blkiodev "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/sys/windows" +) + +const ( + defaultNetworkSpace = "172.16.0.0/12" + platformSupported = true + windowsMinCPUShares = 1 + windowsMaxCPUShares = 10000 + windowsMinCPUPercent = 1 + windowsMaxCPUPercent = 100 + windowsMinCPUCount = 1 + + errInvalidState = syscall.Errno(0x139F) +) + +// Windows has no concept of an execution state directory. So use config.Root here. +func getPluginExecRoot(root string) string { + return filepath.Join(root, "plugins") +} + +func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { + return nil, nil +} + +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + return parseSecurityOpt(container, hostConfig) +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + return nil +} + +func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig == nil { + return nil + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { + warnings := []string{} + + if !isHyperv { + // The processor resource controls are mutually exclusive on + // Windows Server Containers, the order of precedence is + // CPUCount first, then CPUShares, and CPUPercent last. + if resources.CPUCount > 0 { + if resources.CPUShares > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + resources.CPUShares = 0 + } + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } else if resources.CPUShares > 0 { + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } + } + + if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { + return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) + } + if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { + return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) + } + if resources.CPUCount < 0 { + return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") + } + + if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUShares > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") + } + // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. + // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + osv := system.GetOSVersion() + if resources.NanoCPUs > 0 && isHyperv && osv.Build < 16175 { + leftoverNanoCPUs := resources.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { + resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 + warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) + warnings = append(warnings, warningString) + logrus.Warn(warningString) + } + } + + if len(resources.BlkioDeviceReadBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") + } + if len(resources.BlkioDeviceReadIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") + } + if len(resources.BlkioDeviceWriteBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") + } + if len(resources.BlkioDeviceWriteIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") + } + if resources.BlkioWeight > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") + } + if len(resources.BlkioWeightDevice) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") + } + if resources.CgroupParent != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") + } + if resources.CPUPeriod != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") + } + if resources.CpusetCpus != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") + } + if resources.CpusetMems != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") + } + if resources.KernelMemory != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") + } + if resources.MemoryReservation != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") + } + if resources.MemorySwap != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") + } + if resources.OomKillDisable != nil && *resources.OomKillDisable { + return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") + } + if resources.PidsLimit != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") + } + if len(resources.Ulimits) != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") + } + return warnings, nil +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + + hyperv := daemon.runAsHyperVContainer(hostConfig) + if !hyperv && system.IsWindowsClient() && !system.IsIoTCore() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + + w, err := verifyContainerResources(&hostConfig.Resources, hyperv) + warnings = append(warnings, w...) + return warnings, err +} + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(config *config.Config, attributes map[string]string) { +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *config.Config) error { + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + // Validate the OS version. Note that docker.exe must be manifested for this + // call to return the correct version. + osv := system.GetOSVersion() + if osv.MajorVersion < 10 { + return fmt.Errorf("This version of Windows does not support the docker daemon") + } + if osv.Build < 14393 { + return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") + } + + vmcompute := windows.NewLazySystemDLL("vmcompute.dll") + if vmcompute.Load() != nil { + return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.") + } + + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { + return nil +} + +// configureMaxThreads sets the Go runtime max threads threshold +func configureMaxThreads(config *config.Config) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, nil, nil) + if err != nil { + return nil, err + } + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + + // Remove networks not present in HNS + for _, v := range controller.Networks() { + options := v.Info().DriverOptions() + hnsid := options[winlibnetwork.HNSID] + found := false + + for _, v := range hnsresponse { + if v.Id == hnsid { + found = true + break + } + } + + if !found { + // global networks should not be deleted by local HNS + if v.Info().Scope() != datastore.GlobalScope { + err = v.Delete() + if err != nil { + logrus.Errorf("Error occurred when removing network %v", err) + } + } + } + } + + _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) + if err != nil { + return nil, err + } + + defaultNetworkExists := false + + if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + options := network.Info().DriverOptions() + for _, v := range hnsresponse { + if options[winlibnetwork.HNSID] == v.Id { + defaultNetworkExists = true + break + } + } + } + + // discover and add HNS networks to windows + // network that exist are removed and added again + for _, v := range hnsresponse { + if strings.ToLower(v.Type) == "private" { + continue // workaround for HNS reporting unsupported networks + } + var n libnetwork.Network + s := func(current libnetwork.Network) bool { + options := current.Info().DriverOptions() + if options[winlibnetwork.HNSID] == v.Id { + n = current + return true + } + return false + } + + controller.WalkNetworks(s) + if n != nil { + // global networks should not be deleted by local HNS + if n.Info().Scope() == datastore.GlobalScope { + continue + } + v.Name = n.Name() + // This will not cause network delete from HNS as the network + // is not yet populated in the libnetwork windows driver + n.Delete() + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: v.Name, + winlibnetwork.HNSID: v.Id, + } + + v4Conf := []*libnetwork.IpamConf{} + for _, subnet := range v.Subnets { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnet.AddressPrefix + ipamV4Conf.Gateway = subnet.GatewayAddress + v4Conf = append(v4Conf, &ipamV4Conf) + } + + name := v.Name + + // If there is no nat network create one from the first NAT network + // encountered if it doesn't already exist + if !defaultNetworkExists && + runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && + n == nil { + name = runconfig.DefaultDaemonNetworkMode().NetworkName() + defaultNetworkExists = true + } + + v6Conf := []*libnetwork.IpamConf{} + _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + ) + + if err != nil { + logrus.Errorf("Error occurred when creating network %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { + if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + return nil + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), + } + + var ipamOption libnetwork.NetworkOption + var subnetPrefix string + + if config.BridgeConfig.FixedCIDR != "" { + subnetPrefix = config.BridgeConfig.FixedCIDR + } else { + // TP5 doesn't support properly detecting subnet + osv := system.GetOSVersion() + if osv.Build < 14360 { + subnetPrefix = defaultNetworkSpace + } + } + + if subnetPrefix != "" { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnetPrefix + v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) + } + + _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + ipamOption, + ) + + if err != nil { + return fmt.Errorf("Error creating default network: %v", err) + } + + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. As of Windows TP4, links are not supported. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMountsByID(in string) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { + return &idtools.IDMappings{}, nil +} + +func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { + config.Root = rootDir + // Create the root directory if it doesn't exists + if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// runasHyperVContainer returns true if we are going to run as a Hyper-V container +func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { + if hostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + return daemon.defaultIsolation.IsHyperV() + } + + // Container is requesting an isolation mode. Honour it. + return hostConfig.Isolation.IsHyperV() + +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + // Bail out now for Linux containers + if system.LCOWSupported() && container.Platform != "windows" { + return nil + } + + // We do not mount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Mount(container) + } + return nil +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + // Bail out now for Linux containers + if system.LCOWSupported() && container.Platform != "windows" { + return nil + } + + // We do not unmount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Unmount(container) + } + return nil +} + +func driverOptions(config *config.Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + + // Obtain the stats from HCS via libcontainerd + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + + // Start with an empty structure + s := &types.StatsJSON{} + + // Populate the CPU/processor statistics + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: stats.Processor.TotalRuntime100ns, + UsageInKernelmode: stats.Processor.RuntimeKernel100ns, + UsageInUsermode: stats.Processor.RuntimeKernel100ns, + }, + } + + // Populate the memory statistics + s.MemoryStats = types.MemoryStats{ + Commit: stats.Memory.UsageCommitBytes, + CommitPeak: stats.Memory.UsageCommitPeakBytes, + PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes, + } + + // Populate the storage statistics + s.StorageStats = types.StorageStats{ + ReadCountNormalized: stats.Storage.ReadCountNormalized, + ReadSizeBytes: stats.Storage.ReadSizeBytes, + WriteCountNormalized: stats.Storage.WriteCountNormalized, + WriteSizeBytes: stats.Storage.WriteSizeBytes, + } + + // Populate the network statistics + s.Networks = make(map[string]types.NetworkStats) + + for _, nstats := range stats.Network { + s.Networks[nstats.EndpointId] = types.NetworkStats{ + RxBytes: nstats.BytesReceived, + RxPackets: nstats.PacketsReceived, + RxDropped: nstats.DroppedPacketsIncoming, + TxBytes: nstats.BytesSent, + TxPackets: nstats.PacketsSent, + TxDropped: nstats.DroppedPacketsOutgoing, + } + } + + // Set the timestamp + s.Stats.Read = stats.Timestamp + s.Stats.NumProcs = platform.NumProcs() + + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + daemon.defaultIsolation = containertypes.Isolation("process") + // On client SKUs, default to Hyper-V. Note that IoT reports as a client SKU + // but it should not be treated as such. + if system.IsWindowsClient() && !system.IsIoTCore() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + for _, option := range daemon.configStore.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return err + } + key = strings.ToLower(key) + switch key { + + case "isolation": + if !containertypes.Isolation(val).IsValid() { + return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) + } + if containertypes.Isolation(val).IsHyperV() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + if containertypes.Isolation(val).IsProcess() { + if system.IsWindowsClient() && !system.IsIoTCore() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + daemon.defaultIsolation = containertypes.Isolation("process") + } + default: + return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) + } + } + + logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +func setupDaemonProcess(config *config.Config) error { + return nil +} + +// verifyVolumesInfo is a no-op on windows. +// This is called during daemon initialization to migrate volumes from pre-1.7. +// volumes were not supported on windows pre-1.7 +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} + +func getRealPath(path string) (string, error) { + if system.IsIoTCore() { + // Due to https://github.com/golang/go/issues/20506, path expansion + // does not work correctly on the default IoT Core configuration. + // TODO @darrenstahlmsft remove this once golang/go/20506 is fixed + return path, nil + } + return fileutils.ReadSymlinkedDirectory(path) +} diff --git a/daemon/debugtrap_unix.go b/daemon/debugtrap_unix.go new file mode 100644 index 0000000..8605d1d --- /dev/null +++ b/daemon/debugtrap_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + "github.com/Sirupsen/logrus" + stackdump "github.com/docker/docker/pkg/signal" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + path, err := stackdump.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + } + }() +} diff --git a/daemon/debugtrap_unsupported.go b/daemon/debugtrap_unsupported.go new file mode 100644 index 0000000..f5b9170 --- /dev/null +++ b/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package daemon + +func (d *Daemon) setupDumpStackTrap(_ string) { + return +} diff --git a/daemon/debugtrap_windows.go b/daemon/debugtrap_windows.go new file mode 100644 index 0000000..d01f7f3 --- /dev/null +++ b/daemon/debugtrap_windows.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "fmt" + "os" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + // Windows does not support signals like *nix systems. So instead of + // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be + // signaled. ACL'd to builtin administrators and local system + ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") + if err != nil { + logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error()) + return + } + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + h, err := system.CreateEvent(&sa, false, false, ev) + if h == 0 || err != nil { + logrus.Errorf("failed to create debug stackdump event %s: %s", ev, err.Error()) + return + } + go func() { + logrus.Debugf("Stackdump - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + path, err := signal.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + } + }() +} diff --git a/daemon/delete.go b/daemon/delete.go new file mode 100644 index 0000000..2d3cd0f --- /dev/null +++ b/daemon/delete.go @@ -0,0 +1,172 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + volumestore "github.com/docker/docker/volume/store" + "github.com/pkg/errors" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + err := fmt.Errorf("removal of container %s is already in progress", name) + return apierrors.NewBadRequestError(err) + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume) + containerActions.WithValues("delete").UpdateSince(start) + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.nameIndex.Get(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) { + if container.IsRunning() { + if !forceRemove { + state := container.StateString() + procedure := "Stop the container before attempting removal or force remove" + if state == "paused" { + procedure = "Unpause and then " + strings.ToLower(procedure) + } + err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure) + return apierrors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.StopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.Lock() + container.Dead = true + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + container.Unlock() + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.stores[container.Platform].layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist { + return errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.Platform), container.ID) + } + } + + if err := system.EnsureRemoveAll(container.Root); err != nil { + return errors.Wrapf(err, "unable to remove filesystem for %s", container.ID) + } + + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + daemon.containersReplica.Delete(container) + if e := daemon.removeMountPoints(container, removeVolume); e != nil { + logrus.Error(e) + } + container.SetRemoved() + stateCtr.del(container.ID) + daemon.LogContainerEvent(container, "destroy") + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the Engine API +func (daemon *Daemon) VolumeRm(name string, force bool) error { + err := daemon.volumeRm(name) + if err != nil && volumestore.IsInUse(err) { + return apierrors.NewRequestConflictError(err) + } + if err == nil || force { + daemon.volumes.Purge(name) + return nil + } + return err +} + +func (daemon *Daemon) volumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + return errors.Wrap(err, "unable to remove volume") + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/daemon/delete_test.go b/daemon/delete_test.go new file mode 100644 index 0000000..f1a9790 --- /dev/null +++ b/daemon/delete_test.go @@ -0,0 +1,96 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/testutil" + "github.com/stretchr/testify/require" +) + +func newDaemonWithTmpRoot(t *testing.T) (*Daemon, func()) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + require.NoError(t, err) + d := &Daemon{ + repository: tmp, + root: tmp, + } + d.containers = container.NewMemoryStore() + return d, func() { os.RemoveAll(tmp) } +} + +func newContainerWithState(state *container.State) *container.Container { + return &container.Container{ + ID: "test", + State: state, + Config: &containertypes.Config{}, + } + +} + +// TestContainerDelete tests that a useful error message and instructions is +// given when attempting to remove a container (#30842) +func TestContainerDelete(t *testing.T) { + tt := []struct { + errMsg string + fixMsg string + initContainer func() *container.Container + }{ + // a paused container + { + errMsg: "cannot remove a paused container", + fixMsg: "Unpause and then stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Paused: true, Running: true}) + }}, + // a restarting container + { + errMsg: "cannot remove a restarting container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + c := newContainerWithState(container.NewState()) + c.SetRunning(0, true) + c.SetRestarting(&container.ExitStatus{}) + return c + }}, + // a running container + { + errMsg: "cannot remove a running container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Running: true}) + }}, + } + + for _, te := range tt { + c := te.initContainer() + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) + + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false}) + testutil.ErrorContains(t, err, te.errMsg) + testutil.ErrorContains(t, err, te.fixMsg) + } +} + +func TestContainerDoubleDelete(t *testing.T) { + c := newContainerWithState(container.NewState()) + + // Mark the container as having a delete in progress + c.SetRemovalInProgress() + + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) + + // Try to remove the container when its state is removalInProgress. + // It should return an error indicating it is under removal progress. + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true}) + testutil.ErrorContains(t, err, fmt.Sprintf("removal of container %s is already in progress", c.ID)) +} diff --git a/daemon/dependency.go b/daemon/dependency.go new file mode 100644 index 0000000..83144e6 --- /dev/null +++ b/daemon/dependency.go @@ -0,0 +1,17 @@ +package daemon + +import ( + "github.com/docker/swarmkit/agent/exec" +) + +// SetContainerDependencyStore sets the dependency store backend for the container +func (daemon *Daemon) SetContainerDependencyStore(name string, store exec.DependencyGetter) error { + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.DependencyStore = store + + return nil +} diff --git a/daemon/discovery/discovery.go b/daemon/discovery/discovery.go new file mode 100644 index 0000000..509155c --- /dev/null +++ b/daemon/discovery/discovery.go @@ -0,0 +1,202 @@ +package discovery + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + + // Register the libkv backends for discovery. + _ "github.com/docker/docker/pkg/discovery/kv" +) + +const ( + // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. + defaultDiscoveryHeartbeat = 20 * time.Second + // defaultDiscoveryTTLFactor is the default TTL factor for discovery + defaultDiscoveryTTLFactor = 3 +) + +// ErrDiscoveryDisabled is an error returned if the discovery is disabled +var ErrDiscoveryDisabled = errors.New("discovery is disabled") + +// Reloader is the discovery reloader of the daemon +type Reloader interface { + discovery.Watcher + Stop() + Reload(backend, address string, clusterOpts map[string]string) error + ReadyCh() <-chan struct{} +} + +type daemonDiscoveryReloader struct { + backend discovery.Backend + ticker *time.Ticker + term chan bool + readyCh chan struct{} +} + +func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + return d.backend.Watch(stopCh) +} + +func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { + return d.readyCh +} + +func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { + var ( + heartbeat = defaultDiscoveryHeartbeat + ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat + ) + + if hb, ok := clusterOpts["discovery.heartbeat"]; ok { + h, err := strconv.Atoi(hb) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if h <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.heartbeat must be positive") + } + + heartbeat = time.Duration(h) * time.Second + ttl = defaultDiscoveryTTLFactor * heartbeat + } + + if tstr, ok := clusterOpts["discovery.ttl"]; ok { + t, err := strconv.Atoi(tstr) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if t <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl must be positive") + } + + ttl = time.Duration(t) * time.Second + + if _, ok := clusterOpts["discovery.heartbeat"]; !ok { + heartbeat = time.Duration(t) * time.Second / time.Duration(defaultDiscoveryTTLFactor) + } + + if ttl <= heartbeat { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") + } + } + + return heartbeat, ttl, nil +} + +// Init initializes the nodes discovery subsystem by connecting to the specified backend +// and starts a registration loop to advertise the current node under the specified address. +func Init(backendAddress, advertiseAddress string, clusterOpts map[string]string) (Reloader, error) { + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return nil, err + } + + reloader := &daemonDiscoveryReloader{ + backend: backend, + ticker: time.NewTicker(heartbeat), + term: make(chan bool), + readyCh: make(chan struct{}), + } + // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, + // but we never actually Watch() for nodes appearing and disappearing for the moment. + go reloader.advertiseHeartbeat(advertiseAddress) + return reloader, nil +} + +// advertiseHeartbeat registers the current node against the discovery backend using the specified +// address. The function never returns, as registration against the backend comes with a TTL and +// requires regular heartbeats. +func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { + var ready bool + if err := d.initHeartbeat(address); err == nil { + ready = true + close(d.readyCh) + } else { + logrus.WithError(err).Debug("First discovery heartbeat failed") + } + + for { + select { + case <-d.ticker.C: + if err := d.backend.Register(address); err != nil { + logrus.Warnf("Registering as %q in discovery failed: %v", address, err) + } else { + if !ready { + close(d.readyCh) + ready = true + } + } + case <-d.term: + return + } + } +} + +// initHeartbeat is used to do the first heartbeat. It uses a tight loop until +// either the timeout period is reached or the heartbeat is successful and returns. +func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { + // Setup a short ticker until the first heartbeat has succeeded + t := time.NewTicker(500 * time.Millisecond) + defer t.Stop() + // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service + timeout := time.After(60 * time.Second) + + for { + select { + case <-timeout: + return errors.New("timeout waiting for initial discovery") + case <-d.term: + return errors.New("terminated") + case <-t.C: + if err := d.backend.Register(address); err == nil { + return nil + } + } + } +} + +// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. +func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { + d.Stop() + + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return err + } + + d.backend = backend + d.ticker = time.NewTicker(heartbeat) + d.readyCh = make(chan struct{}) + + go d.advertiseHeartbeat(advertiseAddress) + return nil +} + +// Stop terminates the discovery advertising. +func (d *daemonDiscoveryReloader) Stop() { + d.ticker.Stop() + d.term <- true +} + +func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err != nil { + return 0, nil, err + } + + backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) + if err != nil { + return 0, nil, err + } + return heartbeat, backend, nil +} diff --git a/daemon/discovery/discovery_test.go b/daemon/discovery/discovery_test.go new file mode 100644 index 0000000..7814429 --- /dev/null +++ b/daemon/discovery/discovery_test.go @@ -0,0 +1,111 @@ +package discovery + +import ( + "fmt" + "testing" + "time" +) + +func TestDiscoveryOpts(t *testing.T) { + clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("discovery.ttl < discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("discovery.ttl == discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("negative discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("negative discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("invalid discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.ttl": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatal("invalid discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + if ttl != 20*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + expected := 10 * defaultDiscoveryTTLFactor * time.Second + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } + + clusterOpts = map[string]string{"discovery.ttl": "30"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if ttl != 30*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) + } + + expected = 30 * time.Second / defaultDiscoveryTTLFactor + if heartbeat != expected { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) + } + + discaveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1) + clusterOpts = map[string]string{"discovery.ttl": discaveryTTL} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil && heartbeat == 0 { + t.Fatal("discovery.heartbeat must be positive") + } + + clusterOpts = map[string]string{} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != defaultDiscoveryHeartbeat { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) + } + + expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } +} diff --git a/daemon/disk_usage.go b/daemon/disk_usage.go new file mode 100644 index 0000000..c64a243 --- /dev/null +++ b/daemon/disk_usage.go @@ -0,0 +1,128 @@ +package daemon + +import ( + "fmt" + "sync/atomic" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/volume" + "github.com/opencontainers/go-digest" +) + +func (daemon *Daemon) getLayerRefs(platform string) map[layer.ChainID]int { + tmpImages := daemon.stores[platform].imageStore.Map() + layerRefs := map[layer.ChainID]int{} + for id, img := range tmpImages { + dgst := digest.Digest(id) + if len(daemon.stores[platform].referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 { + continue + } + + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + } + } + + return layerRefs +} + +// SystemDiskUsage returns information about the daemon data disk usage +func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) { + if !atomic.CompareAndSwapInt32(&daemon.diskUsageRunning, 0, 1) { + return nil, fmt.Errorf("a disk usage operation is already running") + } + defer atomic.StoreInt32(&daemon.diskUsageRunning, 0) + + // Retrieve container list + allContainers, err := daemon.Containers(&types.ContainerListOptions{ + Size: true, + All: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to retrieve container list: %v", err) + } + + // Get all top images with extra attributes + // TODO @jhowardmsft LCOW. This may need revisiting + allImages, err := daemon.Images(filters.NewArgs(), false, true) + if err != nil { + return nil, fmt.Errorf("failed to retrieve image list: %v", err) + } + + // Get all local volumes + allVolumes := []*types.Volume{} + getLocalVols := func(v volume.Volume) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if d, ok := v.(volume.DetailedVolume); ok { + // skip local volumes with mount options since these could have external + // mounted filesystems that will be slow to enumerate. + if len(d.Options()) > 0 { + return nil + } + } + name := v.Name() + refs := daemon.volumes.Refs(v) + + tv := volumeToAPIType(v) + sz, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("failed to determine size of volume %v", name) + sz = -1 + } + tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} + allVolumes = append(allVolumes, tv) + } + + return nil + } + + err = daemon.traverseLocalVolumes(getLocalVols) + if err != nil { + return nil, err + } + + // Get total layers size on disk + var allLayersSize int64 + for platform := range daemon.stores { + layerRefs := daemon.getLayerRefs(platform) + allLayers := daemon.stores[platform].layerStore.Map() + var allLayersSize int64 + for _, l := range allLayers { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + size, err := l.DiffSize() + if err == nil { + if _, ok := layerRefs[l.ChainID()]; ok { + allLayersSize += size + } else { + logrus.Warnf("found leaked image layer %v platform %s", l.ChainID(), platform) + } + } else { + logrus.Warnf("failed to get diff size for layer %v %s", l.ChainID(), platform) + } + } + } + } + + return &types.DiskUsage{ + LayersSize: allLayersSize, + Containers: allContainers, + Volumes: allVolumes, + Images: allImages, + }, nil +} diff --git a/daemon/errors.go b/daemon/errors.go new file mode 100644 index 0000000..96c30df --- /dev/null +++ b/daemon/errors.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/errors" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + return errors.NewRequestNotFoundError(dne) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} diff --git a/daemon/events.go b/daemon/events.go new file mode 100644 index 0000000..d0b1a86 --- /dev/null +++ b/daemon/events.go @@ -0,0 +1,322 @@ +package daemon + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + daemonevents "github.com/docker/docker/daemon/events" + "github.com/docker/libnetwork" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +var ( + clusterEventAction = map[swarmapi.WatchActionKind]string{ + swarmapi.WatchActionKindCreate: "create", + swarmapi.WatchActionKindUpdate: "update", + swarmapi.WatchActionKindRemove: "remove", + } + ) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to an image with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to an image with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogPluginEvent generates an event related to a plugin with only the default attributes. +func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { + daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) +} + +// LogPluginEventWithAttributes generates an event related to a plugin with specific given attributes. +func (daemon *Daemon) LogPluginEventWithAttributes(pluginID, refName, action string, attributes map[string]string) { + attributes["name"] = refName + actor := events.Actor{ + ID: pluginID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.PluginEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// LogNetworkEvent generates an event related to a network with only the default attributes. +func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { + daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) +} + +// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. +func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { + attributes["name"] = nw.Name() + attributes["type"] = nw.Type() + actor := events.Actor{ + ID: nw.ID(), + Attributes: attributes, + } + daemon.EventsService.Log(action, events.NetworkEventType, actor) +} + +// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. +func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { + if daemon.EventsService != nil { + if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { + attributes["name"] = info.Name + } + actor := events.Actor{ + ID: daemon.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.DaemonEventType, actor) + } +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { + ef := daemonevents.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, until, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} + +// ProcessClusterNotifications gets changes from store and add them to event list +func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStream chan *swarmapi.WatchMessage) { + for { + select { + case <-ctx.Done(): + return + case message, ok := <-watchStream: + if !ok { + logrus.Debug("cluster event channel has stopped") + return + } + daemon.generateClusterEvent(message) + } + } +} + +func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) { + for _, event := range msg.Events { + if event.Object == nil { + logrus.Errorf("event without object: %v", event) + continue + } + switch v := event.Object.GetObject().(type) { + case *swarmapi.Object_Node: + daemon.logNodeEvent(event.Action, v.Node, event.OldObject.GetNode()) + case *swarmapi.Object_Service: + daemon.logServiceEvent(event.Action, v.Service, event.OldObject.GetService()) + case *swarmapi.Object_Network: + daemon.logNetworkEvent(event.Action, v.Network, event.OldObject.GetNetwork()) + case *swarmapi.Object_Secret: + daemon.logSecretEvent(event.Action, v.Secret, event.OldObject.GetSecret()) + default: + logrus.Warnf("unrecognized event: %v", event) + } + } +} + +func (daemon *Daemon) logNetworkEvent(action swarmapi.WatchActionKind, net *swarmapi.Network, oldNet *swarmapi.Network) { + attributes := map[string]string{ + "name": net.Spec.Annotations.Name, + } + eventTime := eventTimestamp(net.Meta, action) + daemon.logClusterEvent(action, net.ID, "network", attributes, eventTime) +} + +func (daemon *Daemon) logSecretEvent(action swarmapi.WatchActionKind, secret *swarmapi.Secret, oldSecret *swarmapi.Secret) { + attributes := map[string]string{ + "name": secret.Spec.Annotations.Name, + } + eventTime := eventTimestamp(secret.Meta, action) + daemon.logClusterEvent(action, secret.ID, "secret", attributes, eventTime) +} + +func (daemon *Daemon) logNodeEvent(action swarmapi.WatchActionKind, node *swarmapi.Node, oldNode *swarmapi.Node) { + name := node.Spec.Annotations.Name + if name == "" && node.Description != nil { + name = node.Description.Hostname + } + attributes := map[string]string{ + "name": name, + } + eventTime := eventTimestamp(node.Meta, action) + // In an update event, display the changes in attributes + if action == swarmapi.WatchActionKindUpdate && oldNode != nil { + if node.Spec.Availability != oldNode.Spec.Availability { + attributes["availability.old"] = strings.ToLower(oldNode.Spec.Availability.String()) + attributes["availability.new"] = strings.ToLower(node.Spec.Availability.String()) + } + if node.Role != oldNode.Role { + attributes["role.old"] = strings.ToLower(oldNode.Role.String()) + attributes["role.new"] = strings.ToLower(node.Role.String()) + } + if node.Status.State != oldNode.Status.State { + attributes["state.old"] = strings.ToLower(oldNode.Status.State.String()) + attributes["state.new"] = strings.ToLower(node.Status.State.String()) + } + // This handles change within manager role + if node.ManagerStatus != nil && oldNode.ManagerStatus != nil { + // leader change + if node.ManagerStatus.Leader != oldNode.ManagerStatus.Leader { + if node.ManagerStatus.Leader { + attributes["leader.old"] = "false" + attributes["leader.new"] = "true" + } else { + attributes["leader.old"] = "true" + attributes["leader.new"] = "false" + } + } + if node.ManagerStatus.Reachability != oldNode.ManagerStatus.Reachability { + attributes["reachability.old"] = strings.ToLower(oldNode.ManagerStatus.Reachability.String()) + attributes["reachability.new"] = strings.ToLower(node.ManagerStatus.Reachability.String()) + } + } + } + + daemon.logClusterEvent(action, node.ID, "node", attributes, eventTime) +} + +func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *swarmapi.Service, oldService *swarmapi.Service) { + attributes := map[string]string{ + "name": service.Spec.Annotations.Name, + } + eventTime := eventTimestamp(service.Meta, action) + + if action == swarmapi.WatchActionKindUpdate && oldService != nil { + // check image + if x, ok := service.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + containerSpec := x.Container + if y, ok := oldService.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + oldContainerSpec := y.Container + if containerSpec.Image != oldContainerSpec.Image { + attributes["image.old"] = oldContainerSpec.Image + attributes["image.new"] = containerSpec.Image + } + } else { + // This should not happen. + logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime()) + } + } + // check replicated count change + if x, ok := service.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + replicas := x.Replicated.Replicas + if y, ok := oldService.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + oldReplicas := y.Replicated.Replicas + if replicas != oldReplicas { + attributes["replicas.old"] = strconv.FormatUint(oldReplicas, 10) + attributes["replicas.new"] = strconv.FormatUint(replicas, 10) + } + } else { + // This should not happen. + logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode()) + } + } + if service.UpdateStatus != nil { + if oldService.UpdateStatus == nil { + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } else if service.UpdateStatus.State != oldService.UpdateStatus.State { + attributes["updatestate.old"] = strings.ToLower(oldService.UpdateStatus.State.String()) + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } + } + } + daemon.logClusterEvent(action, service.ID, "service", attributes, eventTime) +} + +func (daemon *Daemon) logClusterEvent(action swarmapi.WatchActionKind, id, eventType string, attributes map[string]string, eventTime time.Time) { + actor := events.Actor{ + ID: id, + Attributes: attributes, + } + + jm := events.Message{ + Action: clusterEventAction[action], + Type: eventType, + Actor: actor, + Scope: "swarm", + Time: eventTime.UTC().Unix(), + TimeNano: eventTime.UTC().UnixNano(), + } + daemon.EventsService.PublishMessage(jm) +} + +func eventTimestamp(meta swarmapi.Meta, action swarmapi.WatchActionKind) time.Time { + var eventTime time.Time + switch action { + case swarmapi.WatchActionKindCreate: + eventTime, _ = gogotypes.TimestampFromProto(meta.CreatedAt) + case swarmapi.WatchActionKindUpdate: + eventTime, _ = gogotypes.TimestampFromProto(meta.UpdatedAt) + case swarmapi.WatchActionKindRemove: + // There is no timestamp from store message for remove operations. + // Use current time. + eventTime = time.Now() + } + return eventTime +} diff --git a/daemon/events/events.go b/daemon/events/events.go new file mode 100644 index 0000000..d1529e1 --- /dev/null +++ b/daemon/events/events.go @@ -0,0 +1,165 @@ +package events + +import ( + "sync" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/pkg/pubsub" +) + +const ( + eventsLimit = 256 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + eventSubscribers.Inc() + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { + eventSubscribers.Inc() + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, until, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + eventSubscribers.Dec() + e.pub.Evict(l) +} + +// Log creates a local scope message and publishes it +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Scope: "local", + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.PublishMessage(jm) +} + +// PublishMessage broadcasts event to listeners. Each listener has 100 milliseconds to +// receive the event or it will be skipped. +func (e *Events) PublishMessage(jm eventtypes.Message) { + eventsCounter.Inc() + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted between two specific dates. +// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since.IsZero() && until.IsZero() { + return buffered + } + + var sinceNanoUnix int64 + if !since.IsZero() { + sinceNanoUnix = since.UnixNano() + } + + var untilNanoUnix int64 + if !until.IsZero() { + untilNanoUnix = until.UnixNano() + } + + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + + if ev.TimeNano < sinceNanoUnix { + break + } + + if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { + continue + } + + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/daemon/events/events_test.go b/daemon/events/events_test.go new file mode 100644 index 0000000..ffb4e50 --- /dev/null +++ b/daemon/events/events_test.go @@ -0,0 +1,275 @@ +package events + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" + eventstestutils "github.com/docker/docker/daemon/events/testutils" +) + +func TestEventsLog(t *testing.T) { + e := New() + _, l1, _ := e.Subscribe() + _, l2, _ := e.Subscribe() + defer e.Evict(l1) + defer e.Evict(l2) + count := e.SubscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + actor := events.Actor{ + ID: "cont", + Attributes: map[string]string{"image": "image"}, + } + e.Log("test", events.ContainerEventType, actor) + select { + case msg := <-l1: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsLogTimeout(t *testing.T) { + e := New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + c := make(chan struct{}) + go func() { + actor := events.Actor{ + ID: "image", + } + e.Log("test", events.ImageEventType, actor) + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + time.Sleep(50 * time.Millisecond) + current, l, _ := e.Subscribe() + for i := 0; i < 10; i++ { + num := i + eventsLimit + 16 + action := fmt.Sprintf("action_%d", num) + id := fmt.Sprintf("cont_%d", num) + from := fmt.Sprintf("image_%d", num) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + var msgs []events.Message + for len(msgs) < 10 { + m := <-l + jm, ok := (m).(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", m) + } + msgs = append(msgs, jm) + } + if len(current) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) + } + first := current[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_16", first.Status) + } + last := current[len(current)-1] + if last.Status != "action_271" { + t.Fatalf("Last action is %s, must be action_271", last.Status) + } + + firstC := msgs[0] + if firstC.Status != "action_272" { + t.Fatalf("First action is %s, must be action_272", firstC.Status) + } + lastC := msgs[len(msgs)-1] + if lastC.Status != "action_281" { + t.Fatalf("Last action is %s, must be action_281", lastC.Status) + } +} + +// https://github.com/docker/docker/issues/20999 +// Fixtures: +// +//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge) +//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +func TestLoadBufferedEvents(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } +} + +func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.090000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + f, err = timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + u, uNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Unix(u, uNano) + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } + + if out[0].Type != "network" { + t.Fatalf("expected network event, got %s", out[0].Type) + } +} + +// #13753 +func TestIngoreBufferedWhenNoTimes(t *testing.T) { + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Time{} + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 0 { + t.Fatalf("expected 0 buffered events, got %q", out) + } +} diff --git a/daemon/events/filter.go b/daemon/events/filter.go new file mode 100644 index 0000000..7f1a5fd --- /dev/null +++ b/daemon/events/filter.go @@ -0,0 +1,130 @@ +package events + +import ( + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.matchEvent(ev) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchScope(ev.Scope) && + ef.matchDaemon(ev) && + ef.matchContainer(ev) && + ef.matchPlugin(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchEvent(ev events.Message) bool { + // #25798 if an event filter contains either health_status, exec_create or exec_start without a colon + // Let's to a FuzzyMatch instead of an ExactMatch. + if ef.filterContains("event", map[string]struct{}{"health_status": {}, "exec_create": {}, "exec_start": {}}) { + return ef.filter.FuzzyMatch("event", ev.Action) + } + return ef.filter.ExactMatch("event", ev.Action) +} + +func (ef *Filter) filterContains(field string, values map[string]struct{}) bool { + for _, v := range ef.filter.Get(field) { + if _, ok := values[v]; ok { + return true + } + } + return false +} + +func (ef *Filter) matchScope(scope string) bool { + if !ef.filter.Include("scope") { + return true + } + return ef.filter.ExactMatch("scope", scope) +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchDaemon(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.DaemonEventType) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchPlugin(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.PluginEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) matchService(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ServiceEventType) +} + +func (ef *Filter) matchNode(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NodeEventType) +} + +func (ef *Filter) matchSecret(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.SecretEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return image + } + return reference.FamiliarName(ref) +} diff --git a/daemon/events/metrics.go b/daemon/events/metrics.go new file mode 100644 index 0000000..c9a89ec --- /dev/null +++ b/daemon/events/metrics.go @@ -0,0 +1,15 @@ +package events + +import "github.com/docker/go-metrics" + +var ( + eventsCounter metrics.Counter + eventSubscribers metrics.Gauge +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + eventsCounter = ns.NewCounter("events", "The number of events logged") + eventSubscribers = ns.NewGauge("events_subscribers", "The number of current subscribers to events", metrics.Total) + metrics.Register(ns) +} diff --git a/daemon/events/testutils/testutils.go b/daemon/events/testutils/testutils.go new file mode 100644 index 0000000..3544446 --- /dev/null +++ b/daemon/events/testutils/testutils.go @@ -0,0 +1,76 @@ +package testutils + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" +) + +var ( + reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` + reEventType = `(?P\w+)` + reAction = `(?P\w+)` + reID = `(?P[^\s]+)` + reAttributes = `(\s\((?P[^\)]+)\))?` + reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) + + // eventCliRegexp is a regular expression that matches all possible event outputs in the cli + eventCliRegexp = regexp.MustCompile(reString) +) + +// ScanMap turns an event string like the default ones formatted in the cli output +// and turns it into map. +func ScanMap(text string) map[string]string { + matches := eventCliRegexp.FindAllStringSubmatch(text, -1) + md := map[string]string{} + if len(matches) == 0 { + return md + } + + names := eventCliRegexp.SubexpNames() + for i, n := range matches[0] { + md[names[i]] = n + } + return md +} + +// Scan turns an event string like the default ones formatted in the cli output +// and turns it into an event message. +func Scan(text string) (*events.Message, error) { + md := ScanMap(text) + if len(md) == 0 { + return nil, fmt.Errorf("text is not an event: %s", text) + } + + f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) + if err != nil { + return nil, err + } + + t, tn, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + return nil, err + } + + attrs := make(map[string]string) + for _, a := range strings.SplitN(md["attributes"], ", ", -1) { + kv := strings.SplitN(a, "=", 2) + attrs[kv[0]] = kv[1] + } + + tu := time.Unix(t, tn) + return &events.Message{ + Time: t, + TimeNano: tu.UnixNano(), + Type: md["eventType"], + Action: md["action"], + Actor: events.Actor{ + ID: md["id"], + Attributes: attrs, + }, + }, nil +} diff --git a/daemon/events_test.go b/daemon/events_test.go new file mode 100644 index 0000000..7048de2 --- /dev/null +++ b/daemon/events_test.go @@ -0,0 +1,90 @@ +package daemon + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func TestLogContainerEventCopyLabels(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + daemon.LogContainerEvent(container, "create") + + if _, mutated := container.Config.Labels["image"]; mutated { + t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) + } + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "os": "alpine", + }) +} + +func TestLogContainerEventWithAttributes(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + attributes := map[string]string{ + "node": "2", + "foo": "bar", + } + daemon.LogContainerEventWithAttributes(container, "create", attributes) + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "foo": "bar", + }) +} + +func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributesToTest map[string]string) { + select { + case ev := <-l: + event, ok := ev.(eventtypes.Message) + if !ok { + t.Fatalf("Unexpected event message: %q", ev) + } + for key, expected := range expectedAttributesToTest { + actual, ok := event.Actor.Attributes[key] + if !ok || actual != expected { + t.Fatalf("Expected value for key %s to be %s, but was %s (event:%v)", key, expected, actual, event) + } + } + case <-time.After(10 * time.Second): + t.Fatal("LogEvent test timed out") + } +} diff --git a/daemon/exec.go b/daemon/exec.go new file mode 100644 index 0000000..72d01c8 --- /dev/null +++ b/daemon/exec.go @@ -0,0 +1,299 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" +) + +// Seconds to wait after sending TERM before trying KILL +const termProcessTimeout = 10 + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via Engine API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { + cntr, err := d.getActiveContainer(name) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) + return "", err + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = cntr.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + + linkedEnv, err := d.setupLinkedContainers(cntr) + if err != nil { + return "", err + } + execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) + if len(execConfig.User) == 0 { + execConfig.User = cntr.Config.User + } + + d.registerExecCommand(cntr, execConfig) + + d.LogContainerEvent(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +// If ctx is cancelled, the process is terminated. +func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + defer func() { + if err != nil { + ec.Lock() + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + if err := ec.CloseStreams(); err != nil { + logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) + } + ec.Unlock() + c.ExecCommands.Delete(ec.ID) + } + }() + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.StreamConfig.NewInputPipes() + } else { + ec.StreamConfig.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Env: ec.Env, + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return err + } + + attachConfig := stream.AttachConfig{ + TTY: ec.Tty, + UseStdin: cStdin != nil, + UseStdout: cStdout != nil, + UseStderr: cStderr != nil, + Stdin: cStdin, + Stdout: cStdout, + Stderr: cStderr, + DetachKeys: ec.DetachKeys, + CloseStdin: true, + } + ec.StreamConfig.AttachStreams(&attachConfig) + attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig) + + systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p, ec.InitializeStdio) + if err != nil { + return err + } + ec.Lock() + ec.Pid = systemPid + ec.Unlock() + + select { + case <-ctx.Done(): + logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) + select { + case <-time.After(termProcessTimeout * time.Second): + logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) + case <-attachErr: + // TERM signal worked + } + return fmt.Errorf("context cancelled") + case err := <-attachErr: + if err != nil { + if _, ok := err.(term.EscapeError); !ok { + return fmt.Errorf("exec attach failed with error: %v", err) + } + d.LogContainerEvent(c, "exec_detach") + } + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go new file mode 100644 index 0000000..933136f --- /dev/null +++ b/daemon/exec/exec.go @@ -0,0 +1,118 @@ +package exec + +import ( + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/stringid" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + StreamConfig *stream.Config + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string + Env []string + Pid int +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: stream.NewConfig(), + } +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error { + c.StreamConfig.CopyToPipe(iop) + + if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Errorf("error closing exec stdin: %+v", err) + } + } + } + + return nil +} + +// CloseStreams closes the stdio streams for the exec +func (c *Config) CloseStreams() error { + return c.StreamConfig.CloseStreams() +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go new file mode 100644 index 0000000..bb11c11 --- /dev/null +++ b/daemon/exec_linux.go @@ -0,0 +1,50 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &specs.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return err + } + } + } + return nil +} diff --git a/daemon/exec_solaris.go b/daemon/exec_solaris.go new file mode 100644 index 0000000..7003355 --- /dev/null +++ b/daemon/exec_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + return nil +} diff --git a/daemon/exec_windows.go b/daemon/exec_windows.go new file mode 100644 index 0000000..b7b4514 --- /dev/null +++ b/daemon/exec_windows.go @@ -0,0 +1,16 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + // Process arguments need to be escaped before sending to OCI. + if c.Platform == "windows" { + p.Args = escapeArgs(p.Args) + p.User.Username = ec.User + } + return nil +} diff --git a/daemon/export.go b/daemon/export.go new file mode 100644 index 0000000..402e675 --- /dev/null +++ b/daemon/export.go @@ -0,0 +1,59 @@ +package daemon + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + if runtime.GOOS == "windows" { + return fmt.Errorf("the daemon on this platform does not support export of a container") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: daemon.idMappings.UIDs(), + GIDMaps: daemon.idMappings.GIDs(), + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/daemon/getsize_unix.go b/daemon/getsize_unix.go new file mode 100644 index 0000000..434fa43 --- /dev/null +++ b/daemon/getsize_unix.go @@ -0,0 +1,43 @@ +// +build linux freebsd solaris + +package daemon + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(containerID string) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + rwlayer, err := daemon.stores[runtime.GOOS].layerStore.GetRWLayer(containerID) + if err != nil { + logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) + return sizeRw, sizeRootfs + } + defer daemon.stores[runtime.GOOS].layerStore.ReleaseRWLayer(rwlayer) + + sizeRw, err = rwlayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(runtime.GOOS), containerID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := rwlayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 0000000..15710e9 --- /dev/null +++ b/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,642 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver + locker *locker.Locker +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + return err + } + if !mounted { + break + } + + if err := a.unmount(mountpoint); err != nil { + if err != syscall.EBUSY { + return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err) + } + if retries >= 5 { + return fmt.Errorf("aufs: unmount error after retries: %s: %v", mountpoint, err) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logrus.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + continue + } + break + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + if err == syscall.EBUSY { + logrus.Warn("os.Rename err due to EBUSY") + } + return err + } + defer system.EnsureRemoveAll(tmpMntPath) + + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return err + } + defer system.EnsureRemoveAll(tmpDiffpath) + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, mountLabel, parents); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, parent) + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, diff io.Reader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, parent) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, diff) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, diff); err != nil { + return + } + + // FIXME: Instead of syncing all the filesystems we should be fsyncing each + // file as the tar archive gets unpacked + syscall.Sync() + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, parent) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len(",dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 0000000..baf0fd8 --- /dev/null +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,802 @@ +// +build linux + +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "sync" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/stringid" +) + +var ( + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t testing.TB) graphdriver.Driver { + d, err := Init(dir, nil, nil, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t testing.TB) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatal("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.getDiffPath("1")) + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatal("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatal("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatal("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker", nil); err == nil { + t.Fatal("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatal("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.CreateReadWrite("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.CreateReadWrite("3", "2", nil); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "2") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "1") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id none should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[2] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2", nil); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.CreateReadWrite(current, parent, nil); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Error(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Error(err) + } + if len(files) != expected { + t.Errorf("Expected %d got %d", expected, len(files)) + } +} + +func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + defer os.RemoveAll(tmpOuter) + zeroes := "0" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + zeroes += "0" + } +} + +func BenchmarkConcurrentAccess(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + d := newDriver(b) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + numConcurrent := 256 + // create a bunch of ids + var ids []string + for i := 0; i < numConcurrent; i++ { + ids = append(ids, stringid.GenerateNonCryptoID()) + } + + if err := d.Create(ids[0], "", nil); err != nil { + b.Fatal(err) + } + + if err := d.Create(ids[1], ids[0], nil); err != nil { + b.Fatal(err) + } + + parent := ids[1] + ids = append(ids[2:]) + + chErr := make(chan error, numConcurrent) + var outerGroup sync.WaitGroup + outerGroup.Add(len(ids)) + b.StartTimer() + + // here's the actual bench + for _, id := range ids { + go func(id string) { + defer outerGroup.Done() + if err := d.Create(id, parent, nil); err != nil { + b.Logf("Create %s failed", id) + chErr <- err + return + } + var innerGroup sync.WaitGroup + for i := 0; i < b.N; i++ { + innerGroup.Add(1) + go func() { + d.Get(id, "") + d.Put(id) + innerGroup.Done() + }() + } + innerGroup.Wait() + d.Remove(id) + }(id) + } + + outerGroup.Wait() + b.StopTimer() + close(chErr) + for err := range chErr { + if err != nil { + b.Log(err) + b.Fail() + } + } +} diff --git a/daemon/graphdriver/aufs/dirs.go b/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 0000000..d2325fc --- /dev/null +++ b/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go new file mode 100644 index 0000000..da1e892 --- /dev/null +++ b/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/daemon/graphdriver/aufs/mount_linux.go b/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 0000000..8062bae --- /dev/null +++ b/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "syscall" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/daemon/graphdriver/aufs/mount_unsupported.go b/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 0000000..d030b06 --- /dev/null +++ b/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000..3eb4ce8 --- /dev/null +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,671 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, userDiskQuota, err := parseOptions(options) + if err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + if userDiskQuota { + if err := driver.subvolEnableQuota(); err != nil { + return nil, err + } + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (btrfsOptions, bool, error) { + var options btrfsOptions + userDiskQuota := false + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, userDiskQuota, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, userDiskQuota, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + default: + return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + } + } + return options, userDiskQuota, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool + once sync.Once +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if err := d.subvolDisableQuota(); err != nil { + return err + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string, quotaEnabled bool) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := subvolQgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + +func (d *Driver) subvolEnableQuota() error { + d.updateQuotaStatus() + + if d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = true + + return nil +} + +func (d *Driver) subvolDisableQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = false + + return nil +} + +func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func subvolQgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + } + if args.treeid == 0 { + return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := path.Join(d.home, "quotas") + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if err := d.subvolEnableQuota(); err != nil { + return err + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { + return err + } + if err := system.EnsureRemoveAll(dir); err != nil { + return err + } + if err := d.subvolRescanQuota(); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.subvolEnableQuota(); err != nil { + return "", err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return "", err + } + } + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/daemon/graphdriver/btrfs/btrfs_test.go b/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 0000000..0038dbc --- /dev/null +++ b/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,63 @@ +// +build linux + +package btrfs + +import ( + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsSubvolDelete(t *testing.T) { + d := graphtest.GetDriver(t, "btrfs") + if err := d.CreateReadWrite("test", "", nil); err != nil { + t.Fatal(err) + } + defer graphtest.PutDriver(t) + + dir, err := d.Get("test", "") + if err != nil { + t.Fatal(err) + } + defer d.Put("test") + + if err := subvolCreate(dir, "subvoltest"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil { + t.Fatal(err) + } + + if err := d.Remove("test"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) { + t.Fatalf("expected not exist error on nested subvol, got: %v", err) + } +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/btrfs/dummy_unsupported.go b/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000..f070888 --- /dev/null +++ b/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/daemon/graphdriver/btrfs/version.go b/daemon/graphdriver/btrfs/version.go new file mode 100644 index 0000000..73d90cd --- /dev/null +++ b/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/daemon/graphdriver/btrfs/version_none.go b/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 0000000..f802fbc --- /dev/null +++ b/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/daemon/graphdriver/btrfs/version_test.go b/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 0000000..d78d577 --- /dev/null +++ b/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux,!btrfs_noversion + +package btrfs + +import ( + "testing" +) + +func TestLibVersion(t *testing.T) { + if btrfsLibVersion() <= 0 { + t.Error("expected output from btrfs lib version > 0") + } +} diff --git a/daemon/graphdriver/counter.go b/daemon/graphdriver/counter.go new file mode 100644 index 0000000..72551a3 --- /dev/null +++ b/daemon/graphdriver/counter.go @@ -0,0 +1,59 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + infoOp(m) + count := m.count + c.mu.Unlock() + return count +} diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md new file mode 100644 index 0000000..6594fa6 --- /dev/null +++ b/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,98 @@ +# devicemapper - a storage backend based on Device Mapper + +## Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. The preferred model is +to have a thin pool reserved outside of Docker and passed to the +daemon via the `--storage-opt dm.thinpooldev` option. Alternatively, +the device mapper graphdriver can setup a block device to handle this +for you via the `--storage-opt dm.directlvm_device` option. + +As a fallback if no thin pool is provided, loopback files will be +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Docker daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +In loopback, a thin pool is created at `/var/lib/docker/devicemapper` +(devicemapper graph location) based on two block devices, one for +data and one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are +`/var/lib/docker/devicemapper/devicemapper/data` and +`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata +required to map from docker entities to the corresponding devicemapper +volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` +file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the `/var/lib/docker/devicemapper` directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +## Information on `docker info` + +As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver +will display something like: + + $ sudo docker info + [...] + Storage Driver: devicemapper + Pool Name: docker-253:1-17538953-pool + Pool Blocksize: 65.54 kB + Base Device Size: 107.4 GB + Data file: /dev/loop4 + Metadata file: /dev/loop4 + Data Space Used: 2.536 GB + Data Space Total: 107.4 GB + Data Space Available: 104.8 GB + Metadata Space Used: 7.93 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.14 GB + Udev Sync Supported: true + Data loop file: /home/docker/devicemapper/devicemapper/data + Metadata loop file: /home/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.82-git (2013-10-04) + [...] + +### status items + +Each item in the indented section under `Storage Driver: devicemapper` are +status information about the driver. + * `Pool Name` name of the devicemapper pool for this driver. + * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. + * `Base Device Size` tells the maximum size of a container and image + * `Data file` blockdevice file used for the devicemapper data + * `Metadata file` blockdevice file used for the devicemapper metadata + * `Data Space Used` tells how much of `Data file` is currently used + * `Data Space Total` tells max size the `Data file` + * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Metadata Space Used` tells how much of `Metadata file` is currently used + * `Metadata Space Total` tells max size the `Metadata file` + * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. + * `Data loop file` file attached to `Data file`, if loopback device is used + * `Metadata loop file` file attached to `Metadata file`, if loopback device is used + * `Library Version` from the libdevmapper used + +## About the devicemapper options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `dockerd --storage-opt dm.foo=bar`. + +These options are currently documented both in [the man +page](../../../man/docker.1.md) and in [the online +documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#/storage-driver-options). +If you add an options, update both the `man` page and the documentation. diff --git a/daemon/graphdriver/devmapper/device_setup.go b/daemon/graphdriver/devmapper/device_setup.go new file mode 100644 index 0000000..31eeae7 --- /dev/null +++ b/daemon/graphdriver/devmapper/device_setup.go @@ -0,0 +1,247 @@ +package devmapper + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if reflect.DeepEqual(cfg, directLVMConfig{}) { + return nil + } + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + if err := checkDevAvailable(dev); err != nil { + return err + } + if err := checkDevInVG(dev); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(dev); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + pvCreate, err := exec.LookPath("pvcreate") + if err != nil { + return errors.Wrap(err, "error lookuping up command `pvcreate` while setting up direct lvm") + } + + vgCreate, err := exec.LookPath("vgcreate") + if err != nil { + return errors.Wrap(err, "error lookuping up command `vgcreate` while setting up direct lvm") + } + + lvCreate, err := exec.LookPath("lvcreate") + if err != nil { + return errors.Wrap(err, "error lookuping up command `lvcreate` while setting up direct lvm") + } + + lvConvert, err := exec.LookPath("lvconvert") + if err != nil { + return errors.Wrap(err, "error lookuping up command `lvconvert` while setting up direct lvm") + } + + lvChange, err := exec.LookPath("lvchange") + if err != nil { + return errors.Wrap(err, "error lookuping up command `lvchange` while setting up direct lvm") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + + out, err := exec.Command(pvCreate, "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(vgCreate, "docker", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(lvCreate, "--wipesignatures", "y", "-n", "thinpool", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command(lvCreate, "--wipesignatures", "y", "-n", "thinpoolmeta", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command(lvConvert, "-y", "--zero", "n", "-c", "512K", "--thinpool", "docker/thinpool", "--poolmetadata", "docker/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile("/etc/lvm/profile/docker-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing docker thinp autoextend profile") + } + + out, err = exec.Command(lvChange, "--metadataprofile", "docker-thinpool", "docker/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 0000000..c0116c7 --- /dev/null +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,2812 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/loopback" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + units "github.com/docker/go-units" + "github.com/pkg/errors" + + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + return (devices.deviceIDMap[deviceID/8] & mask) == 0 +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v)", hash) + info := &devInfo{ + Hash: hash, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + args = append(args, devices.mkfsArgs...) + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + devinfo = nil + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + return os.RemoveAll(devices.transactionMetaFile()) +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + if dockerversion.IAmStatic == "true" { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + } else { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + } + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + prevSetupConfig, err := readLVMConfig(devices.root) + if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "docker-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the docker root dir + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) + } + } + }() + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if err == devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + if fstype == "xfs" && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + syscall.Unmount(path, syscall.MNT_DETACH) + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + return devices.deactivateDevice(info) +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true + } + + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + default: + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + } + + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + + devices.lvmSetupConfig = lvmSetupConfig + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/daemon/graphdriver/devmapper/devmapper_doc.go b/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 0000000..9ab3e4f --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 0000000..7501397 --- /dev/null +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,152 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "os" + "syscall" + "testing" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +func init() { + // Reduce the size of the base fs and loopback for the tests + defaultDataLoopbackSize = 300 * 1024 * 1024 + defaultMetaDataLoopbackSize = 200 * 1024 * 1024 + defaultBaseFsSize = 300 * 1024 * 1024 + defaultUdevSyncOverride = true + if err := initLoopbacks(); err != nil { + panic(err) + } +} + +// initLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func initLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +func TestDevmapperReduceLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize) +} + +func TestDevmapperIncreaseLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB) +} + +func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + // make sure data or metadata loopback size are the default size + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { + t.Fatal("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } + //Reload + d, err := Init(driver.home, []string{ + fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta), + fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta), + }, nil, nil) + if err != nil { + t.Fatalf("error creating devicemapper driver: %v", err) + } + driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { + t.Fatal("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } +} + +// Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function +func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + + // Call cleanupDeletedDevices() and after the call take and release + // DeviceSet Lock. If lock has not been released, this will hang. + driver.DeviceSet.cleanupDeletedDevices() + + doneChan := make(chan bool) + + go func() { + driver.DeviceSet.Lock() + defer driver.DeviceSet.Unlock() + doneChan <- true + }() + + select { + case <-time.After(time.Second * 5): + // Timer expired. That means lock was not released upon + // function return and we are deadlocked. Release lock + // here so that cleanup could succeed and fail the test. + driver.DeviceSet.Unlock() + t.Fatal("Could not acquire devices lock after call to cleanupDeletedDevices()") + case <-doneChan: + } +} diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 0000000..243d88a --- /dev/null +++ b/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,241 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + locker *locker.Locker +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return fmt.Errorf("failed to remove device %s: %v", id, err) + } + + mp := path.Join(d.home, "mnt", id) + if err := system.EnsureRemoveAll(mp); err != nil { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/daemon/graphdriver/devmapper/mount.go b/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 0000000..cca1fe1 --- /dev/null +++ b/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go new file mode 100644 index 0000000..88f190d --- /dev/null +++ b/daemon/graphdriver/driver.go @@ -0,0 +1,287 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/daemon/graphdriver/driver_freebsd.go b/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 0000000..2891a84 --- /dev/null +++ b/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go new file mode 100644 index 0000000..5c8d0e2 --- /dev/null +++ b/daemon/graphdriver/driver_linux.go @@ -0,0 +1,135 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "overlay2", + "overlay", + "devicemapper", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/daemon/graphdriver/driver_solaris.go b/daemon/graphdriver/driver_solaris.go new file mode 100644 index 0000000..7daf01c --- /dev/null +++ b/daemon/graphdriver/driver_solaris.go @@ -0,0 +1,97 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/daemon/graphdriver/driver_unsupported.go b/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 0000000..4a87560 --- /dev/null +++ b/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/daemon/graphdriver/driver_windows.go b/daemon/graphdriver/driver_windows.go new file mode 100644 index 0000000..ffd30c2 --- /dev/null +++ b/daemon/graphdriver/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go new file mode 100644 index 0000000..20826cd --- /dev/null +++ b/daemon/graphdriver/fsdiff.go @@ -0,0 +1,169 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/daemon/graphdriver/graphtest/graphbench_unix.go b/daemon/graphdriver/graphtest/graphbench_unix.go new file mode 100644 index 0000000..def822b --- /dev/null +++ b/daemon/graphdriver/graphtest/graphbench_unix.go @@ -0,0 +1,259 @@ +// +build linux freebsd + +package graphtest + +import ( + "bytes" + "io" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// DriverBenchExists benchmarks calls to exist +func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !driver.Exists(base) { + b.Fatal("Newly created image doesn't exist") + } + } +} + +// DriverBenchGetEmpty benchmarks calls to get on an empty layer +func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := driver.Get(base, "") + b.StopTimer() + if err != nil { + b.Fatalf("Error getting mount: %s", err) + } + if err := driver.Put(base); err != nil { + b.Fatalf("Error putting mount: %s", err) + } + b.StartTimer() + } +} + +// DriverBenchDiffBase benchmarks calls to diff on a root layer +func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 3); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(base, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffN benchmarks calls to diff on two layers with +// a provided number of files on the lower and upper layers. +func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, bottom, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, top, 6); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffApplyN benchmarks calls to diff and apply together +func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + b.Fatal(err) + } + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + b.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + b.Fatal(err) + } + + b.StartTimer() + + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, "", arch) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + arch.Close() + + if applyDiffSize != diffSize { + // TODO: enforce this + //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) + } + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + b.Fatal(err) + } + } +} + +// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. +func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 50); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(topLayer, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. +func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + root, err := driver.Get(topLayer, "") + if err != nil { + b.Fatal(err) + } + defer driver.Put(topLayer) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + + // Read content + c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + if bytes.Compare(c, content) != 0 { + b.Fatalf("Wrong content in file %v, expected %v", c, content) + } + b.StartTimer() + } +} diff --git a/daemon/graphdriver/graphtest/graphtest_unix.go b/daemon/graphdriver/graphtest/graphtest_unix.go new file mode 100644 index 0000000..6852ca9 --- /dev/null +++ b/daemon/graphdriver/graphtest/graphtest_unix.go @@ -0,0 +1,336 @@ +// +build linux freebsd solaris + +package graphtest + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "syscall" + "testing" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + drv *Driver +) + +// Driver conforms to graphdriver.Driver interface and +// contains information such as root and reference count of the number of clients using it. +// This helps in testing drivers added into the framework. +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t testing.TB, name string, options []string) *Driver { + root, err := ioutil.TempDir("", "docker-graphtest-") + require.NoError(t, err) + + require.NoError(t, os.MkdirAll(root, 0755)) + d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) + if err != nil { + t.Logf("graphdriver: %v\n", err) + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + t.Skipf("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t testing.TB, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count. +func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name, options) + } else { + drv.refCount++ + } + return drv +} + +// PutDriver removes the driver if it is no longer used and updates the reference count. +func PutDriver(t testing.TB) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + err := driver.Create("empty", "", nil) + require.NoError(t, err) + + defer func() { + require.NoError(t, driver.Remove("empty")) + }() + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + require.NoError(t, err) + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := readDir(dir) + require.NoError(t, err) + assert.Len(t, fis, 0) + + driver.Put("empty") +} + +// DriverTestCreateBase create a base driver and verify. +func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + defer func() { + require.NoError(t, driver.Remove("Base")) + }() + verifyBase(t, driver, "Base") +} + +// DriverTestCreateSnap Create a driver and snap and verify. +func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + defer func() { + require.NoError(t, driver.Remove("Base")) + }() + + err := driver.Create("Snap", "Base", nil) + require.NoError(t, err) + defer func() { + require.NoError(t, driver.Remove("Snap")) + }() + + verifyBase(t, driver, "Snap") +} + +// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers +func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + t.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + t.Fatal(err) + } + + err = checkManyLayers(driver, topLayer, layerCount) + if err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { + t.Fatal(err) + } +} + +// DriverTestDiffApply tests diffing and applying produces the same layer +func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + deleteFile := "file-remove.txt" + deleteFileContent := []byte("This file should get removed in upper!") + deleteDir := "var/lib" + + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + if err := addDirectory(driver, base, deleteDir); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { + t.Fatal(err) + } + + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + t.Fatal(err) + } + + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + t.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + arch, err := driver.Diff(upper, base) + if err != nil { + t.Fatal(err) + } + + buf := bytes.NewBuffer(nil) + if _, err := buf.ReadFrom(arch); err != nil { + t.Fatal(err) + } + if err := arch.Close(); err != nil { + t.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) + if err != nil { + t.Fatal(err) + } + + if applyDiffSize != diffSize { + t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) + } + + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteFile); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteDir); err != nil { + t.Fatal(err) + } +} + +// DriverTestChanges tests computed changes on a layer matches changes made +func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, 20, 3); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + expectedChanges, err := changeManyFiles(driver, upper, 20, 6) + if err != nil { + t.Fatal(err) + } + + changes, err := driver.Changes(upper, base) + if err != nil { + t.Fatal(err) + } + + if err = checkChanges(expectedChanges, changes); err != nil { + t.Fatal(err) + } +} + +func writeRandomFile(path string, size uint64) error { + buf := make([]int64, size/8) + + r := rand.NewSource(0) + for i := range buf { + buf[i] = r.Int63() + } + + // Cast to []byte + header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf)) + header.Len *= 8 + header.Cap *= 8 + data := *(*[]byte)(unsafe.Pointer(&header)) + + return ioutil.WriteFile(path, data, 0700) +} + +// DriverTestSetQuota Create a driver and test setting quota. +func DriverTestSetQuota(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + createOpts := &graphdriver.CreateOpts{} + createOpts.StorageOpt = make(map[string]string, 1) + createOpts.StorageOpt["size"] = "50M" + if err := driver.Create("zfsTest", "Base", createOpts); err != nil { + t.Fatal(err) + } + + mountPath, err := driver.Get("zfsTest", "") + if err != nil { + t.Fatal(err) + } + + quota := uint64(50 * units.MiB) + err = writeRandomFile(path.Join(mountPath, "file"), quota*2) + if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT { + t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err) + } + +} diff --git a/daemon/graphdriver/graphtest/graphtest_windows.go b/daemon/graphdriver/graphtest/graphtest_windows.go new file mode 100644 index 0000000..a50c521 --- /dev/null +++ b/daemon/graphdriver/graphtest/graphtest_windows.go @@ -0,0 +1 @@ +package graphtest diff --git a/daemon/graphdriver/graphtest/testutil.go b/daemon/graphdriver/graphtest/testutil.go new file mode 100644 index 0000000..35bf6d1 --- /dev/null +++ b/daemon/graphdriver/graphtest/testutil.go @@ -0,0 +1,342 @@ +package graphtest + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "sort" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +func randomContent(size int, seed int64) []byte { + s := rand.NewSource(seed) + content := make([]byte, size) + + for i := 0; i < len(content); i += 7 { + val := s.Int63() + for j := 0; i+j < len(content) && j < 7; j++ { + content[i+j] = byte(val) + val >>= 8 + } + } + + return content +} + +func addFiles(drv graphdriver.Driver, layer string, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { + return err + } + if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { + return err + } + + return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) +} + +func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + fileContent, err := ioutil.ReadFile(path.Join(root, filename)) + if err != nil { + return err + } + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + + return nil +} + +func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return ioutil.WriteFile(path.Join(root, filename), content, 0755) +} + +func addDirectory(drv graphdriver.Driver, layer, dir string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return os.MkdirAll(path.Join(root, dir), 0755) +} + +func removeAll(drv graphdriver.Driver, layer string, names ...string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for _, filename := range names { + if err := os.RemoveAll(path.Join(root, filename)); err != nil { + return err + } + } + return nil +} + +func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if _, err := os.Stat(path.Join(root, filename)); err == nil { + return fmt.Errorf("file still exists: %s", path.Join(root, filename)) + } else if !os.IsNotExist(err) { + return err + } + + return nil +} + +func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { + return err + } + } + } + + return nil +} + +func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) { + root, err := drv.Get(layer, "") + if err != nil { + return nil, err + } + defer drv.Put(layer) + + changes := []archive.Change{} + for i := 0; i < count; i += 100 { + archiveRoot := fmt.Sprintf("/directory-%d", i) + if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { + return nil, err + } + for j := 0; i+j < count && j < 100; j++ { + if j == 0 { + changes = append(changes, archive.Change{ + Path: archiveRoot, + Kind: archive.ChangeModify, + }) + } + var change archive.Change + switch j % 3 { + // Update file + case 0: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeModify + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Add file + case 1: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) + change.Kind = archive.ChangeAdd + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Remove file + case 2: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeDelete + if err := os.Remove(path.Join(root, change.Path)); err != nil { + return nil, err + } + } + changes = append(changes, change) + } + } + + return changes, nil +} + +func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + fileContent, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + content := randomContent(64, seed+int64(i+j)) + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + } + } + + return nil +} + +type changeList []archive.Change + +func (c changeList) Less(i, j int) bool { + if c[i].Path == c[j].Path { + return c[i].Kind < c[j].Kind + } + return c[i].Path < c[j].Path +} +func (c changeList) Len() int { return len(c) } +func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +func checkChanges(expected, actual []archive.Change) error { + if len(expected) != len(actual) { + return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual)) + } + sort.Sort(changeList(expected)) + sort.Sort(changeList(actual)) + + for i := range expected { + if expected[i] != actual[i] { + return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i]) + } + } + + return nil +} + +func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { + return err + } + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + if err := os.MkdirAll(layerDir, 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { + return err + } + + return nil +} + +func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { + lastLayer := baseLayer + for i := 1; i <= count; i++ { + nextLayer := stringid.GenerateRandomID() + if err := drv.Create(nextLayer, lastLayer, nil); err != nil { + return "", err + } + if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { + return "", err + } + + lastLayer = nextLayer + + } + return lastLayer, nil +} + +func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) + if err != nil { + return err + } + + if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) + } + + for i := count; i > 0; i-- { + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + + thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) + if err != nil { + return err + } + if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) + } + layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) + if err != nil { + return err + } + } + return nil +} + +// readDir reads a directory just like ioutil.ReadDir() +// then hides specific files (currently "lost+found") +// so the tests don't "see" it +func readDir(dir string) ([]os.FileInfo, error) { + a, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + b := a[:0] + for _, x := range a { + if x.Name() != "lost+found" { // ext4 always have this dir + b = append(b, x) + } + } + + return b, nil +} diff --git a/daemon/graphdriver/graphtest/testutil_unix.go b/daemon/graphdriver/graphtest/testutil_unix.go new file mode 100644 index 0000000..63a9341 --- /dev/null +++ b/daemon/graphdriver/graphtest/testutil_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +package graphtest + +import ( + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + require.NoError(t, err) + + actual := fi.Mode() + assert.Equal(t, mode&os.ModeType, actual&os.ModeType, path) + assert.Equal(t, mode&os.ModePerm, actual&os.ModePerm, path) + assert.Equal(t, mode&os.ModeSticky, actual&os.ModeSticky, path) + assert.Equal(t, mode&os.ModeSetuid, actual&os.ModeSetuid, path) + assert.Equal(t, mode&os.ModeSetgid, actual&os.ModeSetgid, path) + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + assert.Equal(t, uid, stat.Uid, path) + assert.Equal(t, gid, stat.Gid, path) + } +} + +func createBase(t testing.TB, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + err := driver.CreateReadWrite(name, "", nil) + require.NoError(t, err) + + dir, err := driver.Get(name, "") + require.NoError(t, err) + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + require.NoError(t, os.Mkdir(subdir, 0705|os.ModeSticky)) + require.NoError(t, os.Chown(subdir, 1, 2)) + + file := path.Join(dir, "a file") + err = ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid) + require.NoError(t, err) +} + +func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + require.NoError(t, err) + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + files, err := readDir(dir) + require.NoError(t, err) + assert.Len(t, files, 2) +} diff --git a/daemon/graphdriver/lcow/lcow.go b/daemon/graphdriver/lcow/lcow.go new file mode 100644 index 0000000..079252e --- /dev/null +++ b/daemon/graphdriver/lcow/lcow.go @@ -0,0 +1,539 @@ +// +build windows + +package lcow + +// Maintainer: @jhowardmsft +// Graph-driver for Linux Containers On Windows (LCOW) + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/jhowardmsft/opengcs/gogcs/client" +) + +// init registers the LCOW driver to the register. +func init() { + graphdriver.Register("lcow", InitLCOW) +} + +const ( + // sandboxFilename is the name of the file containing a layers sandbox (read-write layer) + sandboxFilename = "sandbox.vhdx" + + // svmScratchFilename is the name of the scratch-space used by an SVM to avoid running out of memory + svmScratchFilename = "scratch.vhdx" +) + +// cacheType is our internal structure representing an item in our local cache +// of things that have been mounted. +type cacheType struct { + uvmPath string // Path in utility VM + hostPath string // Path on host + refCount int // How many times its been mounted + isSandbox bool // True if a sandbox +} + +// Driver represents an LCOW graph driver. +type Driver struct { + // homeDir is the hostpath where we're storing everything + homeDir string + // cachedSandboxFile is the location of the local default-sized cached sandbox + cachedSandboxFile string + // options are the graphdriver options we are initialised with + options []string + // config is the representation of the SVM. + // @jhowardmsft LIFETIME TODO - For now, a global service utility-VM + config client.Config + // svmScratchSpaceFile is a host location for a dedicated scratch space + // that the SVM utilities can use as a scratch-space to avoid OOMs + // @jhowardmsft LIFETIME TODO - For now, a global service utility-VM + svmScratchSpaceFile string + + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + + // cacheMu is the mutex protection add/update/deletes to our cache + cacheMu sync.Mutex + // cache is the cache of all the IDs we've mounted/unmounted. + cache map[string]cacheType +} + +// InitLCOW returns a new LCOW storage driver. +func InitLCOW(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + title := "lcowdriver: init:" + logrus.Debugf("%s %s", title, home) + + d := &Driver{ + homeDir: home, + options: options, + cachedSandboxFile: filepath.Join(home, "cache", sandboxFilename), + svmScratchSpaceFile: filepath.Join(home, "svmscratch", svmScratchFilename), + cache: make(map[string]cacheType), + } + + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, home, err) + } + + // Cache directory for blank sandbox so don't have to pull it from the service VM each time + if err := idtools.MkdirAllAs(filepath.Dir(d.cachedSandboxFile), 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, home, err) + } + + // Location for the SVM scratch + if err := idtools.MkdirAllAs(filepath.Dir(d.svmScratchSpaceFile), 0700, 0, 0); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, home, err) + } + + return d, nil +} + +// startUvm starts the service utility VM if it isn't running. +// TODO @jhowardmsft. This will change before RS3 ships as we move to a model of one +// service VM globally to a service VM per container (or offline operation). However, +// for the initial bring-up of LCOW, this is acceptable. +func (d *Driver) startUvm(context string) error { + const toolsScratchPath = "/mnt/gcs/LinuxServiceVM/scratch" + + // Nothing to do if it's already running + if d.config.Uvm != nil { + return nil + } + + // So we need to start it. Generate a default configuration + if err := d.config.GenerateDefault(d.options); err != nil { + return fmt.Errorf("failed to generate default gogcs configuration (%s): %s", context, err) + } + + scratchAttached := false + if _, err := os.Stat(d.svmScratchSpaceFile); err == nil { + // We have a scratch space already, so just attach it as a mapped virtual disk + logrus.Debugf("lcowdriver: startuvm: (%s) attaching pre-existing scratch", context) + mvd := hcsshim.MappedVirtualDisk{ + HostPath: d.svmScratchSpaceFile, + ContainerPath: toolsScratchPath, + CreateInUtilityVM: true, + } + d.config.MappedVirtualDisks = append(d.config.MappedVirtualDisks, mvd) + scratchAttached = true + } + + d.config.Name = "LinuxServiceVM" // TODO @jhowardmsft - This requires an in-flight platform change. Can't hard code it to this longer term + if err := d.config.Create(); err != nil { + return fmt.Errorf("failed to start utility VM (%s): %s", context, err) + } + + // Hot-add the scratch-space if not already attached + if !scratchAttached { + logrus.Debugf("lcowdriver: startuvm: (%s) creating an SVM scratch", context) + if err := d.config.CreateSandbox(d.svmScratchSpaceFile, client.DefaultSandboxSizeMB, d.cachedSandboxFile); err != nil { + return fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err) + } + logrus.Debugf("lcowdriver: startuvm: (%s) hot-adding an SVM scratch", context) + if err := d.config.HotAddVhd(d.svmScratchSpaceFile, toolsScratchPath); err != nil { + return fmt.Errorf("failed to hot-add %s failed: %s", d.svmScratchSpaceFile, err) + } + } + logrus.Debugf("lcowdriver: startuvm: (%s) successful", context) + return nil +} + +// terminateUvm terminates the service utility VM if its running. +func (d *Driver) terminateUvm(context string) error { + // Nothing to do if it's not running + if d.config.Uvm == nil { + return nil + } + + // FIXME: @jhowardmsft + // This isn't thread-safe yet, but will change anyway with the lifetime + // changes and multiple instances. Deferring that work for now. + uvm := d.config.Uvm + d.config.Uvm = nil + + if err := uvm.Terminate(); err != nil { + return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) + } + + if err := uvm.WaitTimeout(time.Duration(d.config.UvmTimeoutSeconds) * time.Second); err != nil { + return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) + } + + return nil +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "lcow" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"LCOW", ""}, + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + _, err := os.Lstat(d.dir(id)) + logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil) + return err == nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. That equates to creating a sandbox VHDx. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + logrus.Debugf("lcowdriver: createreadwrite: id %s", id) + + if err := d.startUvm("createreadwrite"); err != nil { + return err + } + + if err := d.Create(id, parent, opts); err != nil { + return err + } + + return d.config.CreateSandbox(filepath.Join(d.dir(id), sandboxFilename), client.DefaultSandboxSizeMB, d.cachedSandboxFile) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent) + + parentChain, err := d.getLayerChain(parent) + if err != nil { + return err + } + + var layerChain []string + if parent != "" { + if !d.Exists(parent) { + return fmt.Errorf("lcowdriver: cannot create read-only layer with missing parent %s", parent) + } + layerChain = []string{d.dir(parent)} + } + layerChain = append(layerChain, parentChain...) + + // Make sure layers are created with the correct ACL so that VMs can access them. + layerPath := d.dir(id) + logrus.Debugf("lcowdriver: create: id %s: creating layerPath %s", id, layerPath) + if err := system.MkdirAllWithACL(layerPath, 755, system.SddlNtvmAdministratorsLocalSystem); err != nil { + return err + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := os.RemoveAll(layerPath); err2 != nil { + logrus.Warnf("Failed to remove layer %s: %s", layerPath, err2) + } + return err + } + logrus.Debugf("lcowdriver: createreadwrite: id %s: success", id) + + return nil +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + logrus.Debugf("lcowdriver: remove: id %s", id) + tmpID := fmt.Sprintf("%s-removing", id) + tmpLayerPath := d.dir(tmpID) + layerPath := d.dir(id) + + logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + + if err := os.RemoveAll(tmpLayerPath); err != nil { + return err + } + + logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath) + return nil +} + +// Get returns the rootfs path for the id. It is reference counted and +// effectively can be thought of as a "mount the layer into the utility +// vm if it isn't already" +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir, _, _, err := d.getEx(id) + return dir, err +} + +// getEx is Get, but also returns the cache-entry and the size of the VHD +func (d *Driver) getEx(id string) (string, cacheType, int64, error) { + title := "lcowdriver: getEx" + logrus.Debugf("%s %s", title, id) + + if err := d.startUvm(fmt.Sprintf("getex %s", id)); err != nil { + logrus.Debugf("%s failed to start utility vm: %s", title, err) + return "", cacheType{}, 0, err + } + + // Work out what we are working on + vhdFilename, vhdSize, isSandbox, err := client.LayerVhdDetails(d.dir(id)) + if err != nil { + logrus.Debugf("%s failed to get LayerVhdDetails from %s: %s", title, d.dir(id), err) + return "", cacheType{}, 0, fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err) + } + logrus.Debugf("%s %s, size %d, isSandbox %t", title, vhdFilename, vhdSize, isSandbox) + + hotAddRequired := false + d.cacheMu.Lock() + var cacheEntry cacheType + if _, ok := d.cache[id]; !ok { + // The item is not currently in the cache. + // + // Sandboxes need hot-adding in the case that there is a single global utility VM + // This will change for multiple instances with the lifetime changes. + if isSandbox { + hotAddRequired = true + } + d.cache[id] = cacheType{ + uvmPath: fmt.Sprintf("/mnt/%s", id), + refCount: 1, + isSandbox: isSandbox, + hostPath: vhdFilename, + } + } else { + // Increment the reference counter in the cache. + cacheEntry = d.cache[id] + cacheEntry.refCount++ + d.cache[id] = cacheEntry + } + + cacheEntry = d.cache[id] + logrus.Debugf("%s %s: isSandbox %t, refCount %d", title, id, cacheEntry.isSandbox, cacheEntry.refCount) + d.cacheMu.Unlock() + + if hotAddRequired { + logrus.Debugf("%s %s: Hot-Adding %s", title, id, vhdFilename) + if err := d.config.HotAddVhd(vhdFilename, cacheEntry.uvmPath); err != nil { + return "", cacheType{}, 0, fmt.Errorf("%s hot add %s failed: %s", title, vhdFilename, err) + } + } + + logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), cacheEntry, vhdSize) + return d.dir(id), cacheEntry, vhdSize, nil +} + +// Put does the reverse of get. If there are no more references to +// the layer, it unmounts it from the utility VM. +func (d *Driver) Put(id string) error { + title := "lcowdriver: put" + logrus.Debugf("%s %s", title, id) + + if err := d.startUvm(fmt.Sprintf("put %s", id)); err != nil { + return err + } + + d.cacheMu.Lock() + // Bad-news if unmounting something that isn't in the cache. + entry, ok := d.cache[id] + if !ok { + d.cacheMu.Unlock() + return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id) + } + + // Are we just decrementing the reference count + if entry.refCount > 1 { + entry.refCount-- + d.cache[id] = entry + logrus.Debugf("%s %s: refCount decremented to %d", title, id, entry.refCount) + d.cacheMu.Unlock() + return nil + } + + // No more references, so tear it down if previously hot-added + if entry.isSandbox { + logrus.Debugf("%s %s: Hot-Removing %s", title, id, entry.hostPath) + if err := d.config.HotRemoveVhd(entry.hostPath); err != nil { + d.cacheMu.Unlock() + return fmt.Errorf("%s failed to hot-remove %s from service utility VM: %s", title, entry.hostPath, err) + } + } + + // @jhowardmsft TEMPORARY FIX WHILE WAITING FOR HOT-REMOVE TO BE FIXED IN PLATFORM + //d.terminateUvm(fmt.Sprintf("put %s", id)) + + // Remove from the cache map. + delete(d.cache, id) + d.cacheMu.Unlock() + + logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, entry.hostPath, entry.uvmPath) + return nil +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + title := "lcowdriver: cleanup" + logrus.Debugf(title) + + d.cacheMu.Lock() + for k, v := range d.cache { + logrus.Debugf("%s cache entry: %s: %+v", title, k, v) + if v.refCount > 0 { + logrus.Warnf("%s leaked %s: %+v", title, k, v) + } + } + d.cacheMu.Unlock() + + items, err := ioutil.ReadDir(d.homeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := os.RemoveAll(filepath.Join(d.homeDir, item.Name())); err != nil { + logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err) + } else { + logrus.Infof("%s cleaned up %s", title, item.Name()) + } + } + } + return nil +} + +// Diff takes a layer (and it's parent layer which may be null, but +// is ignored by this implementation below) and returns a reader for +// a tarstream representing the layers contents. The id could be +// a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics +// of this function dictate that the layer is already mounted. +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + title := "lcowdriver: diff:" + logrus.Debugf("%s id %s", title, id) + + if err := d.startUvm(fmt.Sprintf("diff %s", id)); err != nil { + return nil, err + } + + d.cacheMu.Lock() + if _, ok := d.cache[id]; !ok { + d.cacheMu.Unlock() + return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id) + } + cacheEntry := d.cache[id] + d.cacheMu.Unlock() + + // Stat to get size + fileInfo, err := os.Stat(cacheEntry.hostPath) + if err != nil { + return nil, fmt.Errorf("%s failed to stat %s: %s", title, cacheEntry.hostPath, err) + } + + // Then obtain the tar stream for it + logrus.Debugf("%s %s, size %d, isSandbox %t", title, cacheEntry.hostPath, fileInfo.Size(), cacheEntry.isSandbox) + tarReadCloser, err := d.config.VhdToTar(cacheEntry.hostPath, cacheEntry.uvmPath, cacheEntry.isSandbox, fileInfo.Size()) + if err != nil { + return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err) + } + logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent) + return tarReadCloser, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. The layer should not be mounted when calling +// this function. Another way of describing this is that ApplyDiff writes +// to a new layer (a VHD in LCOW) the contents of a tarstream it's given. +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + logrus.Debugf("lcowdriver: applydiff: id %s", id) + + if err := d.startUvm(fmt.Sprintf("applydiff %s", id)); err != nil { + return 0, err + } + + return d.config.TarToVhd(filepath.Join(d.homeDir, id, "layer.vhd"), diff) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent) + // TODO @gupta-ak. Needs implementation with assistance from service VM + return nil, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + logrus.Debugf("lcowdriver: diffsize: id %s", id) + // TODO @gupta-ak. Needs implementation with assistance from service VM + return 0, nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + logrus.Debugf("lcowdriver: getmetadata: id %s", id) + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.homeDir, filepath.Base(id)) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath) + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err) + } + return layerChain, nil +} + +// setLayerChain stores the layer chain information on disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath) + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err) + } + return nil +} diff --git a/daemon/graphdriver/overlay/copy.go b/daemon/graphdriver/overlay/copy.go new file mode 100644 index 0000000..666a5c0 --- /dev/null +++ b/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,174 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 0000000..7f849c9 --- /dev/null +++ b/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,469 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") + backingFs = "" +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When an overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with an empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + supportsDType bool + locker *locker.Locker +} + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v17.12 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + locker: locker.New(), + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do an overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + return system.EnsureRemoveAll(d.dir(id)) +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + return rootDir, nil + } + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + // If id has a root, just return + if _, err := os.Stat(path.Join(d.dir(id), "root")); err == nil { + return nil + } + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, syscall.MNT_DETACH); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/daemon/graphdriver/overlay/overlay_test.go b/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 0000000..34b6d80 --- /dev/null +++ b/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,93 @@ +// +build linux + +package overlay + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlay50LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 50, "overlay") +} + +// Fails due to bug in calculating changes after apply +// likely related to https://github.com/docker/docker/issues/21555 +func TestOverlayDiffApply10Files(t *testing.T) { + t.Skipf("Fails to compute changes after apply intermittently") + graphtest.DriverTestDiffApply(t, 10, "overlay") +} + +func TestOverlayChanges(t *testing.T) { + t.Skipf("Fails to compute changes intermittently") + graphtest.DriverTestChanges(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, "overlay") +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, "overlay") +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, "overlay") +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, "overlay") +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, "overlay") +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, "overlay") +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, "overlay") +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, "overlay") +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, "overlay") +} diff --git a/daemon/graphdriver/overlay/overlay_unsupported.go b/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 0000000..3dbb4de --- /dev/null +++ b/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/daemon/graphdriver/overlay2/check.go b/daemon/graphdriver/overlay2/check.go new file mode 100644 index 0000000..53a7199 --- /dev/null +++ b/daemon/graphdriver/overlay2/check.go @@ -0,0 +1,79 @@ +// +build linux + +package overlay2 + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// hasOpaqueCopyUpBug checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory. When this bug exists naive diff should be used. +func hasOpaqueCopyUpBug(d string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + if err := syscall.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := syscall.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + return nil +} diff --git a/daemon/graphdriver/overlay2/mount.go b/daemon/graphdriver/overlay2/mount.go new file mode 100644 index 0000000..60e248b --- /dev/null +++ b/daemon/graphdriver/overlay2/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay2 + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("docker-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + } + return nil +} + +// mountfromMain is the entry-point for docker-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/daemon/graphdriver/overlay2/overlay.go b/daemon/graphdriver/overlay2/overlay.go new file mode 100644 index 0000000..5af0e90 --- /dev/null +++ b/daemon/graphdriver/overlay2/overlay.go @@ -0,0 +1,728 @@ +// +build linux + +package overlay2 + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/daemon/graphdriver/quota" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" + + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + driverName = "overlay2" + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + overrideKernelCheck bool + quota quota.Quota +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool + locker *locker.Locker +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register(driverName, Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // require kernel 4.0.0 to ensure multiple lower dirs are supported + v, err := kernel.GetKernelVersion() + if err != nil { + return nil, err + } + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + return nil, graphdriver.ErrNotSupported + } + logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay2' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicBtrfs: + // Support for OverlayFS on BTRFS was added in kernel 4.7 + // See https://btrfs.wiki.kernel.org/index.php/Changelog + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + logrus.Errorf("'overlay2' requires kernel 4.7 to use on %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + logrus.Warn("Using pre-4.7.0 kernel for overlay2 on btrfs, may require kernel update") + } + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v17.12 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + locker: locker.New(), + options: *opts, + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) + + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } else if opts.quota.Size > 0 { + return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) + } + } else if opts.quota.Size > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. + return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "overlay2.override_kernel_check": + o.overrideKernelCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "overlay2.size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) + default: + return nil, fmt.Errorf("overlay2: unknown option %s", key) + } + } + return o, nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func useNaiveDiff(home string) bool { + useNaiveDiffLock.Do(func() { + if err := hasOpaqueCopyUpBug(home); err != nil { + logrus.Warnf("Not using native diff for overlay2: %v", err) + useNaiveDiffOnly = true + } + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return driverName +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, + } +} + +// GetMetadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + return d.create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } + } + return d.create(id, parent, opts) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + + if driver.options.quota.Size > 0 { + // Set container disk quota limit + if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { + return err + } + } + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + // if no parent directory, done + if parent == "" { + return nil + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lp, err := os.Readlink(path.Join(d.home, s)) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, just return diff directory + if os.IsNotExist(err) { + return diffDir, nil + } + return "", err + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + + workDir := path.Join(dir, "work") + splitLowers := strings.Split(string(lowers), ":") + absLowers := make([]string, len(splitLowers)) + for i, s := range splitLowers { + absLowers[i] = path.Join(d.home, s) + } + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work")) + mountData := label.FormatMountLabel(opts, mountLabel) + mount := syscall.Mount + mountTarget := mergedDir + + pageSize := syscall.Getpagesize() + + // Go can return a larger page size than supported by the system + // as of go 1.7. This will be fixed in 1.8 and this block can be + // removed when building with 1.8. + // See https://github.com/golang/go/commit/1b9499b06989d2831e5b156161d6c07642926ee1 + // See https://github.com/docker/docker/issues/27384 + if pageSize > 4096 { + pageSize = 4096 + } + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if len(mountData) > pageSize { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountData = label.FormatMountLabel(opts, mountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + } + + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, no mount happened and just return directly + if os.IsNotExist(err) { + return nil + } + return err + } + + mountpoint := path.Join(dir, "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, syscall.MNT_DETACH); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.ApplyDiff(id, parent, diff) + } + + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }); err != nil { + return 0, err + } + + // FIXME: Instead of syncing all the filesystems we should be fsyncing each + // file as the tar archive gets unpacked + syscall.Sync() + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, parent) + } + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, parent) + } + + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, parent) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} diff --git a/daemon/graphdriver/overlay2/overlay_test.go b/daemon/graphdriver/overlay2/overlay_test.go new file mode 100644 index 0000000..cf77ff2 --- /dev/null +++ b/daemon/graphdriver/overlay2/overlay_test.go @@ -0,0 +1,121 @@ +// +build linux + +package overlay2 + +import ( + "io/ioutil" + "os" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + untar = archive.UntarUncompressed + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer + + reexec.Init() +} + +func cdMountFrom(dir, device, target, mType, label string) error { + wd, err := os.Getwd() + if err != nil { + return err + } + os.Chdir(dir) + defer os.Chdir(wd) + + return syscall.Mount(device, target, mType, 0, label) +} + +func skipIfNaive(t *testing.T) { + td, err := ioutil.TempDir("", "naive-check-") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(td) + + if useNaiveDiff(td) { + t.Skipf("Cannot run test with naive diff") + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, driverName) +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, driverName) +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, driverName) +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, driverName) +} + +func TestOverlay128LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 128, driverName) +} + +func TestOverlayDiffApply10Files(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestDiffApply(t, 10, driverName) +} + +func TestOverlayChanges(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestChanges(t, driverName) +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, driverName) +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, driverName) +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, driverName) +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, driverName) +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, driverName) +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, driverName) +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, driverName) +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, driverName) +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, driverName) +} diff --git a/daemon/graphdriver/overlay2/overlay_unsupported.go b/daemon/graphdriver/overlay2/overlay_unsupported.go new file mode 100644 index 0000000..e5ac4ca --- /dev/null +++ b/daemon/graphdriver/overlay2/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay2 diff --git a/daemon/graphdriver/overlay2/randomid.go b/daemon/graphdriver/overlay2/randomid.go new file mode 100644 index 0000000..af5cb65 --- /dev/null +++ b/daemon/graphdriver/overlay2/randomid.go @@ -0,0 +1,80 @@ +// +build linux + +package overlay2 + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/daemon/graphdriver/overlayutils/overlayutils.go b/daemon/graphdriver/overlayutils/overlayutils.go new file mode 100644 index 0000000..7491c34 --- /dev/null +++ b/daemon/graphdriver/overlayutils/overlayutils.go @@ -0,0 +1,18 @@ +// +build linux + +package overlayutils + +import ( + "errors" + "fmt" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type support will no longer be supported in Docker 17.12." + return errors.New(msg) +} diff --git a/daemon/graphdriver/plugin.go b/daemon/graphdriver/plugin.go new file mode 100644 index 0000000..f6852f0 --- /dev/null +++ b/daemon/graphdriver/plugin.go @@ -0,0 +1,43 @@ +package graphdriver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/plugin/v2" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + proxy := &graphDriverProxy{name, pl, Capabilities{}} + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/daemon/graphdriver/proxy.go b/daemon/graphdriver/proxy.go new file mode 100644 index 0000000..120afad --- /dev/null +++ b/daemon/graphdriver/proxy.go @@ -0,0 +1,263 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin + caps Capabilities +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Capabilities Capabilities `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + caps, err := d.fetchCaps() + if err != nil { + return err + } + d.caps = caps + return nil +} + +func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Capabilities", args, &ret); err != nil { + if !plugins.IsNotFound(err) { + return Capabilities{}, err + } + } + return ret.Capabilities, nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Capabilities() Capabilities { + return d.caps +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.CreateReadWrite", id, parent, opts) +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.Create", id, parent, opts) +} + +func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.p.Client().Call(method, args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return filepath.Join(d.p.BasePath(), ret.Dir), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.p.Client().Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/daemon/graphdriver/quota/projectquota.go b/daemon/graphdriver/quota/projectquota.go new file mode 100644 index 0000000..e408d5f --- /dev/null +++ b/daemon/graphdriver/quota/projectquota.go @@ -0,0 +1,339 @@ +// +build linux + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef XFS_PROJ_QUOTA +#define XFS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the home directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 999:/var/lib/docker/overlay2 >> /etc/projects +// echo docker:999 >> /etc/projid +// xfs_quota -x -c 'project -s docker' / +// +// In that case, the home directory project id will be used as a "start offset" +// and all containers will be assigned larger project ids (e.g. >= 1000). +// This is a way to prevent xfs_quota management from conflicting with docker. +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.XFS_PROJ_QUOTA + + d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + return fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var d C.fs_disk_quota_t + + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + + return nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + fileinfo, err := os.Stat(home) + if err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case comeone copied the home directory over to a new device + syscall.Unlink(backingFsBlockDev) + stat := fileinfo.Sys().(*syscall.Stat_t) + if err := syscall.Mknod(backingFsBlockDev, syscall.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/daemon/graphdriver/register/register_aufs.go b/daemon/graphdriver/register/register_aufs.go new file mode 100644 index 0000000..262954d --- /dev/null +++ b/daemon/graphdriver/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/aufs" +) diff --git a/daemon/graphdriver/register/register_btrfs.go b/daemon/graphdriver/register/register_btrfs.go new file mode 100644 index 0000000..f456cc5 --- /dev/null +++ b/daemon/graphdriver/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/daemon/graphdriver/register/register_devicemapper.go b/daemon/graphdriver/register/register_devicemapper.go new file mode 100644 index 0000000..bb2e9ef --- /dev/null +++ b/daemon/graphdriver/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/daemon/graphdriver/register/register_overlay.go b/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 0000000..9ba849c --- /dev/null +++ b/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,9 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" + _ "github.com/docker/docker/daemon/graphdriver/overlay2" +) diff --git a/daemon/graphdriver/register/register_vfs.go b/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 0000000..98fad23 --- /dev/null +++ b/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/daemon/graphdriver/register/register_windows.go b/daemon/graphdriver/register/register_windows.go new file mode 100644 index 0000000..5bb1fd6 --- /dev/null +++ b/daemon/graphdriver/register/register_windows.go @@ -0,0 +1,7 @@ +package register + +import ( + // register the windows graph drivers + _ "github.com/docker/docker/daemon/graphdriver/lcow" + _ "github.com/docker/docker/daemon/graphdriver/windows" +) diff --git a/daemon/graphdriver/register/register_zfs.go b/daemon/graphdriver/register/register_zfs.go new file mode 100644 index 0000000..8f34e35 --- /dev/null +++ b/daemon/graphdriver/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go new file mode 100644 index 0000000..15a4de3 --- /dev/null +++ b/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,134 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), + } + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + idMappings *idtools.IDMappings +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + dir := d.dir(id) + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { + return err + } + if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + return CopyWithTar(parentDir, dir) +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := system.EnsureRemoveAll(d.dir(id)); err != nil { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/daemon/graphdriver/vfs/vfs_test.go b/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 0000000..9ecf21d --- /dev/null +++ b/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,37 @@ +// +build linux + +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go new file mode 100644 index 0000000..514650a --- /dev/null +++ b/daemon/graphdriver/windows/windows.go @@ -0,0 +1,960 @@ +//+build windows + +package windows + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/longpath" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + units "github.com/docker/go-units" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = syscall.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = syscall.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// panicIfUsedByLcow does exactly what it says. +// TODO @jhowardmsft - this is a temporary measure for the bring-up of +// Linux containers on Windows. It is a failsafe to ensure that the right +// graphdriver is used. +func panicIfUsedByLcow() { + if system.LCOWSupported() { + panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enlighting the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id, mountLabel string) (string, error) { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) + var dir string + + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + _, exists := d.cache[rID] + delete(d.cache, rID) + d.cacheMu.Unlock() + + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + items, err := ioutil.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + panicIfUsedByLcow() + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + panicIfUsedByLcow() + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, rPId) + if err != nil { + return + } + + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + panicIfUsedByLcow() + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := syscall.UTF16FromString(path) + if err != nil { + return err + } + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + panicIfUsedByLcow() + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/daemon/graphdriver/zfs/MAINTAINERS b/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 0000000..9c270c5 --- /dev/null +++ b/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 0000000..bc2b419 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,420 @@ +// +build linux freebsd solaris + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/selinux/go-selinux/label" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + if err := mount.MakePrivate(base); err != nil { + return nil, err + } + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + mount.Unmount(mountpoint) + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) + if err != nil || !mounted { + return err + } + + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return nil +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/daemon/graphdriver/zfs/zfs_freebsd.go b/daemon/graphdriver/zfs/zfs_freebsd.go new file mode 100644 index 0000000..1c05fa7 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/daemon/graphdriver/zfs/zfs_linux.go b/daemon/graphdriver/zfs/zfs_linux.go new file mode 100644 index 0000000..52ed516 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/daemon/graphdriver/zfs/zfs_solaris.go b/daemon/graphdriver/zfs/zfs_solaris.go new file mode 100644 index 0000000..bb4a85b --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/daemon/graphdriver/zfs/zfs_test.go b/daemon/graphdriver/zfs/zfs_test.go new file mode 100644 index 0000000..3e22928 --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_test.go @@ -0,0 +1,35 @@ +// +build linux + +package zfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestZfsSetup and TestZfsTeardown +func TestZfsSetup(t *testing.T) { + graphtest.GetDriver(t, "zfs") +} + +func TestZfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "zfs") +} + +func TestZfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "zfs") +} + +func TestZfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "zfs") +} + +func TestZfsSetQuota(t *testing.T) { + graphtest.DriverTestSetQuota(t, "zfs") +} + +func TestZfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/zfs/zfs_unsupported.go b/daemon/graphdriver/zfs/zfs_unsupported.go new file mode 100644 index 0000000..ce8daad --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/daemon/health.go b/daemon/health.go new file mode 100644 index 0000000..ffae746 --- /dev/null +++ b/daemon/health.go @@ -0,0 +1,395 @@ +package daemon + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/restartmanager" +) + +const ( + // Longest healthcheck probe output message to store. Longer messages will be truncated. + maxOutputLen = 4096 + + // Default interval between probe runs (from the end of the first to the start of the second). + // Also the time before the first probe. + defaultProbeInterval = 30 * time.Second + + // The maximum length of time a single probe run should take. If the probe takes longer + // than this, the check is considered to have failed. + defaultProbeTimeout = 30 * time.Second + + // The time given for the container to start before the health check starts considering + // the container unstable. Defaults to none. + defaultStartPeriod = 0 * time.Second + + // Default number of consecutive failures of the health check + // for the container to be considered unhealthy. + defaultProbeRetries = 3 + + // Maximum number of entries to record + maxLogEntries = 5 +) + +const ( + // Exit status codes that can be returned by the probe command. + + exitStatusHealthy = 0 // Container is healthy + exitStatusUnhealthy = 1 // Container is unhealthy +) + +// probe implementations know how to run a particular type of probe. +type probe interface { + // Perform one run of the check. Returns the exit code and an optional + // short diagnostic string. + run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) +} + +// cmdProbe implements the "CMD" probe type. +type cmdProbe struct { + // Run the command with the system's default shell instead of execing it directly. + shell bool +} + +// exec the healthcheck command in the container. +// Returns the exit code and probe output (if any) +func (p *cmdProbe) run(ctx context.Context, d *Daemon, cntr *container.Container) (*types.HealthcheckResult, error) { + cmdSlice := strslice.StrSlice(cntr.Config.Healthcheck.Test)[1:] + if p.shell { + cmdSlice = append(getShell(cntr.Config), cmdSlice...) + } + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) + execConfig := exec.NewConfig() + execConfig.OpenStdin = false + execConfig.OpenStdout = true + execConfig.OpenStderr = true + execConfig.ContainerID = cntr.ID + execConfig.DetachKeys = []byte{} + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = false + execConfig.Privileged = false + execConfig.User = cntr.Config.User + + linkedEnv, err := d.setupLinkedContainers(cntr) + if err != nil { + return nil, err + } + execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(execConfig.Tty, linkedEnv), execConfig.Env) + + d.registerExecCommand(cntr, execConfig) + d.LogContainerEvent(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + output := &limitedBuffer{} + err = d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) + if err != nil { + return nil, err + } + info, err := d.getExecConfig(execConfig.ID) + if err != nil { + return nil, err + } + if info.ExitCode == nil { + return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", cntr.ID) + } + // Note: Go's json package will handle invalid UTF-8 for us + out := output.String() + return &types.HealthcheckResult{ + End: time.Now(), + ExitCode: *info.ExitCode, + Output: out, + }, nil +} + +// Update the container's Status.Health struct based on the latest probe's result. +func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult, done chan struct{}) { + c.Lock() + defer c.Unlock() + + // probe may have been cancelled while waiting on lock. Ignore result then + select { + case <-done: + return + default: + } + + retries := c.Config.Healthcheck.Retries + if retries <= 0 { + retries = defaultProbeRetries + } + + h := c.State.Health + oldStatus := h.Status + + if len(h.Log) >= maxLogEntries { + h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) + } else { + h.Log = append(h.Log, result) + } + + if result.ExitCode == exitStatusHealthy { + h.FailingStreak = 0 + h.Status = types.Healthy + } else { // Failure (including invalid exit code) + shouldIncrementStreak := true + + // If the container is starting (i.e. we never had a successful health check) + // then we check if we are within the start period of the container in which + // case we do not increment the failure streak. + if h.Status == types.Starting { + startPeriod := timeoutWithDefault(c.Config.Healthcheck.StartPeriod, defaultStartPeriod) + timeSinceStart := result.Start.Sub(c.State.StartedAt) + + // If still within the start period, then don't increment failing streak. + if timeSinceStart < startPeriod { + shouldIncrementStreak = false + } + } + + if shouldIncrementStreak { + h.FailingStreak++ + + if h.FailingStreak >= retries { + h.Status = types.Unhealthy + } + } + // Else we're starting or healthy. Stay in that state. + } + + // replicate Health status changes + if err := c.CheckpointTo(d.containersReplica); err != nil { + // queries will be inconsistent until the next probe runs or other state mutations + // checkpoint the container + logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err) + } + + if oldStatus != h.Status { + d.LogContainerEvent(c, "health_status: "+h.Status) + + if d.HasExperimental() && h.Status == types.Unhealthy { + restart, wait, err := c.RestartManager().ShouldRestart(0, false, time.Since(c.StartedAt), c.Health.Health) + if err == nil && restart { + logrus.Infof("Unhealthy container %v: restarting...", c.ID) + go func() { + err := <-wait + if err == nil { + d.stopHealthchecks(c) + if err := d.containerRestart(c, c.StopTimeout()); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } + } else if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } + }() + } + } + } +} + +// Run the container's monitoring thread until notified via "stop". +// There is never more than one monitor thread running per container at a time. +func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { + probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) + probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) + for { + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) + return + case <-time.After(probeInterval): + logrus.Debugf("Running health check for container %s ...", c.ID) + startTime := time.Now() + ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) + results := make(chan *types.HealthcheckResult, 1) + go func() { + healthChecksCounter.Inc() + result, err := probe.run(ctx, d, c) + if err != nil { + healthChecksFailedCounter.Inc() + logrus.Warnf("Health check for container %s error: %v", c.ID, err) + results <- &types.HealthcheckResult{ + ExitCode: -1, + Output: err.Error(), + Start: startTime, + End: time.Now(), + } + } else { + result.Start = startTime + logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) + results <- result + } + close(results) + }() + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) + cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results + return + case result := <-results: + handleProbeResult(d, c, result, stop) + // Stop timeout + cancelProbe() + case <-ctx.Done(): + logrus.Debugf("Health check for container %s taking too long", c.ID) + handleProbeResult(d, c, &types.HealthcheckResult{ + ExitCode: -1, + Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), + Start: startTime, + End: time.Now(), + }, stop) + cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results + } + } + } +} + +// Get a suitable probe implementation for the container's healthcheck configuration. +// Nil will be returned if no healthcheck was configured or NONE was set. +func getProbe(c *container.Container) probe { + config := c.Config.Healthcheck + if config == nil || len(config.Test) == 0 { + return nil + } + switch config.Test[0] { + case "CMD": + return &cmdProbe{shell: false} + case "CMD-SHELL": + return &cmdProbe{shell: true} + default: + logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) + return nil + } +} + +// Ensure the health-check monitor is running or not, depending on the current +// state of the container. +// Called from monitor.go, with c locked. +func (d *Daemon) updateHealthMonitor(c *container.Container) { + h := c.State.Health + if h == nil { + return // No healthcheck configured + } + + probe := getProbe(c) + wantRunning := c.Running && !c.Paused && probe != nil + if wantRunning { + if stop := h.OpenMonitorChannel(); stop != nil { + go monitor(d, c, stop, probe) + } + } else { + h.CloseMonitorChannel() + } +} + +// Reset the health state for a newly-started, restarted or restored container. +// initHealthMonitor is called from monitor.go and we should never be running +// two instances at once. +// Called with c locked. +func (d *Daemon) initHealthMonitor(c *container.Container) { + // If no healthcheck is setup then don't init the monitor + if getProbe(c) == nil { + return + } + + // This is needed in case we're auto-restarting + d.stopHealthchecks(c) + + if h := c.State.Health; h != nil { + h.Status = types.Starting + h.FailingStreak = 0 + } else { + h := &container.Health{} + h.Status = types.Starting + c.State.Health = h + } + + d.updateHealthMonitor(c) +} + +// Called when the container is being stopped (whether because the health check is +// failing or for any other reason). +func (d *Daemon) stopHealthchecks(c *container.Container) { + h := c.State.Health + if h != nil { + h.CloseMonitorChannel() + } +} + +// Buffer up to maxOutputLen bytes. Further data is discarded. +type limitedBuffer struct { + buf bytes.Buffer + mu sync.Mutex + truncated bool // indicates that data has been lost +} + +// Append to limitedBuffer while there is room. +func (b *limitedBuffer) Write(data []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + + bufLen := b.buf.Len() + dataLen := len(data) + keep := min(maxOutputLen-bufLen, dataLen) + if keep > 0 { + b.buf.Write(data[:keep]) + } + if keep < dataLen { + b.truncated = true + } + return dataLen, nil +} + +// The contents of the buffer, with "..." appended if it overflowed. +func (b *limitedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + + out := b.buf.String() + if b.truncated { + out = out + "..." + } + return out +} + +// If configuredValue is zero, use defaultValue instead. +func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { + if configuredValue == 0 { + return defaultValue + } + return configuredValue +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func getShell(config *containertypes.Config) []string { + if len(config.Shell) != 0 { + return config.Shell + } + if runtime.GOOS != "windows" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/daemon/health_test.go b/daemon/health_test.go new file mode 100644 index 0000000..4fd8914 --- /dev/null +++ b/daemon/health_test.go @@ -0,0 +1,154 @@ +package daemon + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func reset(c *container.Container) { + c.State = &container.State{} + c.State.Health = &container.Health{} + c.State.Health.Status = types.Starting +} + +func TestNoneHealthcheck(t *testing.T) { + c := &container.Container{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Healthcheck: &containertypes.HealthConfig{ + Test: []string{"NONE"}, + }, + }, + State: &container.State{}, + } + store, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } + daemon := &Daemon{ + containersReplica: store, + } + + daemon.initHealthMonitor(c) + if c.State.Health != nil { + t.Error("Expecting Health to be nil, but was not") + } +} + +// FIXME(vdemeester) This takes around 3s… This is *way* too long +func TestHealthStates(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + expect := func(expected string) { + select { + case event := <-l: + ev := event.(eventtypes.Message) + if ev.Status != expected { + t.Errorf("Expecting event %#v, but got %#v\n", expected, ev.Status) + } + case <-time.After(1 * time.Second): + t.Errorf("Expecting event %#v, but got nothing\n", expected) + } + } + + c := &container.Container{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + }, + } + + store, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } + + daemon := &Daemon{ + EventsService: e, + containersReplica: store, + } + + c.Config.Healthcheck = &containertypes.HealthConfig{ + Retries: 1, + } + + reset(c) + + handleResult := func(startTime time.Time, exitCode int) { + handleProbeResult(daemon, c, &types.HealthcheckResult{ + Start: startTime, + End: startTime, + ExitCode: exitCode, + }, nil) + } + + // starting -> failed -> success -> failed + + handleResult(c.State.StartedAt.Add(1*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(2*time.Second), 0) + expect("health_status: healthy") + + handleResult(c.State.StartedAt.Add(3*time.Second), 1) + expect("health_status: unhealthy") + + // Test retries + + reset(c) + c.Config.Healthcheck.Retries = 3 + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + handleResult(c.State.StartedAt.Add(40*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 2 { + t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(60*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } + + // Test start period + + reset(c) + c.Config.Healthcheck.Retries = 2 + c.Config.Healthcheck.StartPeriod = 30 * time.Second + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(50*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 1 { + t.Errorf("Expecting FailingStreak=1, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } +} diff --git a/daemon/image.go b/daemon/image.go new file mode 100644 index 0000000..a51049d --- /dev/null +++ b/daemon/image.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + ref reference.Reference +} + +func (e ErrImageDoesNotExist) Error() string { + ref := e.ref + if named, ok := ref.(reference.Named); ok { + ref = reference.TagNameOnly(named) + } + return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) +} + +// GetImageIDAndPlatform returns an image ID and platform corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageIDAndPlatform(refOrID string) (image.ID, string, error) { + ref, err := reference.ParseAnyReference(refOrID) + if err != nil { + return "", "", err + } + namedRef, ok := ref.(reference.Named) + if !ok { + digested, ok := ref.(reference.Digested) + if !ok { + return "", "", ErrImageDoesNotExist{ref} + } + id := image.IDFromDigest(digested.Digest()) + for platform := range daemon.stores { + if _, err = daemon.stores[platform].imageStore.Get(id); err == nil { + return id, platform, nil + } + } + return "", "", ErrImageDoesNotExist{ref} + } + + for platform := range daemon.stores { + if id, err := daemon.stores[platform].referenceStore.Get(namedRef); err == nil { + return image.IDFromDigest(id), platform, nil + } + } + + // deprecated: repo:shortid https://github.com/docker/docker/pull/799 + if tagged, ok := namedRef.(reference.Tagged); ok { + if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) { + for platform := range daemon.stores { + if id, err := daemon.stores[platform].imageStore.Search(tag); err == nil { + for _, storeRef := range daemon.stores[platform].referenceStore.References(id.Digest()) { + if storeRef.Name() == namedRef.Name() { + return id, platform, nil + } + } + } + } + } + } + + // Search based on ID + for platform := range daemon.stores { + if id, err := daemon.stores[platform].imageStore.Search(refOrID); err == nil { + return id, platform, nil + } + } + + return "", "", ErrImageDoesNotExist{ref} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, platform, err := daemon.GetImageIDAndPlatform(refOrID) + if err != nil { + return nil, err + } + return daemon.stores[platform].imageStore.Get(imgID) +} diff --git a/daemon/image_delete.go b/daemon/image_delete.go new file mode 100644 index 0000000..4e22859 --- /dev/null +++ b/daemon/image_delete.go @@ -0,0 +1,413 @@ +package daemon + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { + start := time.Now() + records := []types.ImageDeleteResponseItem{} + + imgID, platform, err := daemon.GetImageIDAndPlatform(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.stores[platform].referenceStore.References(imgID.Digest()) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !force && isSingleReference(repoRefs) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNormalizedNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(platform, parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.stores[platform].referenceStore.References(imgID.Digest()) + + // If a tag reference was removed and the only remaining + // references to the same repository are digest references, + // then clean up those digest references. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundRepoTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + foundRepoTagRef = true + break + } + } + if !foundRepoTagRef { + // Remove canonical references from same repository + remainingRefs := []reference.Named{} + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + if _, err := daemon.removeImageRef(platform, repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(repoRef)} + records = append(records, untaggedRecord) + } else { + remainingRefs = append(remainingRefs, repoRef) + + } + } + repoRefs = remainingRefs + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is at most one tag + // reference to the image AND all references are within one + // repository, then remove all references. + if isSingleReference(repoRefs) { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil { + return nil, conflict + } + + for _, repoRef := range repoRefs { + parsedRef, err := daemon.removeImageRef(platform, repoRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + } + + if err := daemon.imageDeleteHelper(imgID, platform, &records, force, prune, removedRepositoryRef); err != nil { + return nil, err + } + + imageActions.WithValues("delete").UpdateSince(start) + + return records, nil +} + +// isSingleReference returns true when all references are from one repository +// and there is at most one tag. Returns false for empty input. +func isSingleReference(repoRefs []reference.Named) bool { + if len(repoRefs) <= 1 { + return len(repoRefs) == 1 + } + var singleRef reference.Named + canonicalRefs := map[string]struct{}{} + for _, repoRef := range repoRefs { + if _, isCanonical := repoRef.(reference.Canonical); isCanonical { + canonicalRefs[repoRef.Name()] = struct{}{} + } else if singleRef == nil { + singleRef = repoRef + } else { + return false + } + } + if singleRef == nil { + // Just use first canonical ref + singleRef = repoRefs[0] + } + _, ok := canonicalRefs[singleRef.Name()] + return len(canonicalRefs) == 1 && ok +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(platform string, ref reference.Named) (reference.Named, error) { + ref = reference.TagNameOnly(ref) + + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.stores[platform].referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem) error { + imageRefs := daemon.stores[platform].referenceStore.References(imgID.Digest()) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(platform, imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID, platform) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.stores[platform].imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, platform, records); err != nil { + return err + } + + removedLayers, err := daemon.stores[platform].imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, platform, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, platform string, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.stores[platform].imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.stores[platform].referenceStore.References(imgID.Digest())) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in multiple repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID, platform string) bool { + return !(len(daemon.stores[platform].referenceStore.References(imgID.Digest())) > 0 || len(daemon.stores[platform].imageStore.Children(imgID)) > 0) +} diff --git a/daemon/image_exporter.go b/daemon/image_exporter.go new file mode 100644 index 0000000..a7b0be6 --- /dev/null +++ b/daemon/image_exporter.go @@ -0,0 +1,37 @@ +package daemon + +import ( + "io" + "runtime" + + "github.com/docker/docker/image/tarexport" + "github.com/docker/docker/pkg/system" +) + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + // TODO @jhowardmsft LCOW. This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.stores[platform].referenceStore, daemon) + return imageExporter.Save(names, outStream) +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + // TODO @jhowardmsft LCOW. This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.stores[platform].referenceStore, daemon) + return imageExporter.Load(inTar, outStream, quiet) +} diff --git a/daemon/image_history.go b/daemon/image_history.go new file mode 100644 index 0000000..c9e8155 --- /dev/null +++ b/daemon/image_history.go @@ -0,0 +1,91 @@ +package daemon + +import ( + "fmt" + "runtime" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/layer" +) + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { + start := time.Now() + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + // If the image OS isn't set, assume it's the host OS + platform := img.OS + if platform == "" { + platform = runtime.GOOS + } + + history := []*image.HistoryResponseItem{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.stores[platform].layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*image.HistoryResponseItem{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.stores[platform].referenceStore.References(id.Digest()) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, reference.FamiliarString(r)) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + imageActions.WithValues("history").UpdateSince(start) + return history, nil +} diff --git a/daemon/image_inspect.go b/daemon/image_inspect.go new file mode 100644 index 0000000..3baf265 --- /dev/null +++ b/daemon/image_inspect.go @@ -0,0 +1,96 @@ +package daemon + +import ( + "runtime" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, errors.Wrapf(err, "no such image: %s", name) + } + + // If the image OS isn't set, assume it's the host OS + platform := img.OS + if platform == "" { + platform = runtime.GOOS + } + + refs := daemon.stores[platform].referenceStore.References(img.ID().Digest()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, reference.FamiliarString(ref)) + case reference.Canonical: + repoDigests = append(repoDigests, reference.FamiliarString(ref)) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.stores[platform].layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + lastUpdated, err := daemon.stores[platform].imageStore.GetLastUpdated(img.ID()) + if err != nil { + return nil, err + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: platform, + OsVersion: img.OSVersion, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + Metadata: types.ImageMetadata{ + LastTagTime: lastUpdated, + }, + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName(platform) + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} diff --git a/daemon/image_pull.go b/daemon/image_pull.go new file mode 100644 index 0000000..307d945 --- /dev/null +++ b/daemon/image_pull.go @@ -0,0 +1,132 @@ +package daemon + +import ( + "io" + "runtime" + "strings" + + dist "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.Parse(tag) + if err == nil { + ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + if err != nil { + return err + } + } + + return daemon.pullImageWithReference(ctx, ref, platform, metaHeaders, authConfig, outStream) +} + +func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + // Default to the host OS platform in case it hasn't been populated with an explicit value. + if platform == "" { + platform = runtime.GOOS + } + + var deltaImageStore image.Store + if daemon.deltaStore != nil { + deltaImageStore = daemon.deltaStore.imageStore + } + + imagePullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.stores[platform].distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore, deltaImageStore), + ReferenceStore: daemon.stores[platform].referenceStore, + }, + DownloadManager: daemon.downloadManager, + Schema2Types: distribution.ImageTypes, + Platform: platform, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// GetRepository returns a repository from the registry. +func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { + // get repository info + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, false, err + } + // makes sure name is not empty or `scratch` + if err := distribution.ValidateRepoName(repoInfo.Name); err != nil { + return nil, false, err + } + + // get endpoints + endpoints, err := daemon.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return nil, false, err + } + + // retrieve repository + var ( + confirmedV2 bool + repository dist.Repository + lastError error + ) + + for _, endpoint := range endpoints { + if endpoint.Version == registry.APIVersion1 { + continue + } + + repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") + if lastError == nil && confirmedV2 { + break + } + } + return repository, confirmedV2, lastError +} diff --git a/daemon/image_push.go b/daemon/image_push.go new file mode 100644 index 0000000..6c1f409 --- /dev/null +++ b/daemon/image_push.go @@ -0,0 +1,71 @@ +package daemon + +import ( + "io" + "runtime" + + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" + "golang.org/x/net/context" +) + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + // TODO @jhowardmsft LCOW Support. This will require revisiting. For now, hard-code. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.stores[platform].distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore, nil), + ReferenceStore: daemon.stores[platform].referenceStore, + }, + ConfigMediaType: schema2.MediaTypeImageConfig, + LayerStore: distribution.NewLayerProviderFromStore(daemon.stores[platform].layerStore), + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err = distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} diff --git a/daemon/image_tag.go b/daemon/image_tag.go new file mode 100644 index 0000000..5f28dae --- /dev/null +++ b/daemon/image_tag.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" +) + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(imageName, repository, tag string) error { + imageID, platform, err := daemon.GetImageIDAndPlatform(imageName) + if err != nil { + return err + } + + newTag, err := reference.ParseNormalizedNamed(repository) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(reference.TrimNamed(newTag), tag); err != nil { + return err + } + } + + return daemon.TagImageWithReference(imageID, platform, newTag) +} + +// TagImageWithReference adds the given reference to the image ID provided. +func (daemon *Daemon) TagImageWithReference(imageID image.ID, platform string, newTag reference.Named) error { + if err := daemon.stores[platform].referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { + return err + } + + if err := daemon.stores[platform].imageStore.SetLastUpdated(imageID); err != nil { + return err + } + daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") + return nil +} diff --git a/daemon/images.go b/daemon/images.go new file mode 100644 index 0000000..4baf703 --- /dev/null +++ b/daemon/images.go @@ -0,0 +1,359 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "runtime" + "sort" + "time" + + "github.com/pkg/errors" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, + "before": true, + "since": true, + "reference": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.ImageSummary + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + // TODO @jhowardmsft LCOW. This will need work to enumerate the stores for all platforms. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + return daemon.stores[platform].imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { + + // TODO @jhowardmsft LCOW. This will need work to enumerate the stores for all platforms. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.stores[platform].imageStore.Heads() + } else { + allImages = daemon.stores[platform].imageStore.Map() + } + + var beforeFilter, sinceFilter *image.Image + err = imageFilters.WalkValues("before", func(value string) error { + beforeFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + err = imageFilters.WalkValues("since", func(value string) error { + sinceFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + images := []*types.ImageSummary{} + var imagesMap map[*image.Image]*types.ImageSummary + var layerRefs map[layer.ChainID]int + var allLayers map[layer.ChainID]layer.Layer + var allContainers []*container.Container + + for id, img := range allImages { + if beforeFilter != nil { + if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { + continue + } + } + + if sinceFilter != nil { + if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { + continue + } + } + + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.stores[platform].layerStore.Get(layerID) + if err != nil { + // The layer may have been deleted between the call to `Map()` or + // `Heads()` and the call to `Get()`, so we just ignore this error + if err == layer.ErrLayerDoesNotExist { + continue + } + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.stores[platform].referenceStore.References(id.Digest()) { + if imageFilters.Include("reference") { + var found bool + var matchErr error + for _, pattern := range imageFilters.Get("reference") { + found, matchErr = reference.FamiliarMatch(pattern, ref) + if matchErr != nil { + return nil, matchErr + } + } + if !found { + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref)) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref)) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.stores[platform].imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if imageFilters.Include("reference") { // skip images with no references if filtering by reference + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly && len(newImage.RepoTags) > 0 { + continue + } + + if withExtraAttrs { + // lazily init variables + if imagesMap == nil { + allContainers = daemon.List() + allLayers = daemon.stores[platform].layerStore.Map() + imagesMap = make(map[*image.Image]*types.ImageSummary) + layerRefs = make(map[layer.ChainID]int) + } + + // Get container count + newImage.Containers = 0 + for _, c := range allContainers { + if c.ImageID == id { + newImage.Containers++ + } + } + + // count layer references + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + if _, ok := allLayers[chid]; !ok { + return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + } + } + imagesMap[img] = newImage + } + + images = append(images, newImage) + } + + if withExtraAttrs { + // Get Shared sizes + for img, newImage := range imagesMap { + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + newImage.SharedSize = 0 + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + + diffSize, err := allLayers[chid].DiffSize() + if err != nil { + return nil, err + } + + if layerRefs[chid] > 1 { + newImage.SharedSize += diffSize + } + } + } + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +// SquashImage creates a new image with the diff of the specified image and the specified parent. +// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. +// The existing image(s) is not destroyed. +// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. +func (daemon *Daemon) SquashImage(id, parent string) (string, error) { + + var ( + img *image.Image + err error + ) + for _, ds := range daemon.stores { + if img, err = ds.imageStore.Get(image.ID(id)); err == nil { + break + } + } + if err != nil { + return "", err + } + + var parentImg *image.Image + var parentChainID layer.ChainID + if len(parent) != 0 { + parentImg, err = daemon.stores[img.Platform()].imageStore.Get(image.ID(parent)) + if err != nil { + return "", errors.Wrap(err, "error getting specified parent layer") + } + parentChainID = parentImg.RootFS.ChainID() + } else { + rootFS := image.NewRootFS() + parentImg = &image.Image{RootFS: rootFS} + } + + l, err := daemon.stores[img.Platform()].layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return "", errors.Wrap(err, "error getting image layer") + } + defer daemon.stores[img.Platform()].layerStore.Release(l) + + ts, err := l.TarStreamFrom(parentChainID) + if err != nil { + return "", errors.Wrapf(err, "error getting tar stream to parent") + } + defer ts.Close() + + newL, err := daemon.stores[img.Platform()].layerStore.Register(ts, parentChainID, layer.Platform(img.Platform())) + if err != nil { + return "", errors.Wrap(err, "error registering layer") + } + defer daemon.stores[img.Platform()].layerStore.Release(newL) + + var newImage image.Image + newImage = *img + newImage.RootFS = nil + + var rootFS image.RootFS + rootFS = *parentImg.RootFS + rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) + newImage.RootFS = &rootFS + + for i, hi := range newImage.History { + if i >= len(parentImg.History) { + hi.EmptyLayer = true + } + newImage.History[i] = hi + } + + now := time.Now() + var historyComment string + if len(parent) > 0 { + historyComment = fmt.Sprintf("merge %s to %s", id, parent) + } else { + historyComment = fmt.Sprintf("create new from %s", id) + } + + newImage.History = append(newImage.History, image.History{ + Created: now, + Comment: historyComment, + }) + newImage.Created = now + + b, err := json.Marshal(&newImage) + if err != nil { + return "", errors.Wrap(err, "error marshalling image config") + } + + newImgID, err := daemon.stores[img.Platform()].imageStore.Create(b) + if err != nil { + return "", errors.Wrap(err, "error creating new image after squash") + } + return string(newImgID), nil +} + +func newImage(image *image.Image, size int64) *types.ImageSummary { + newImage := new(types.ImageSummary) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = size + newImage.VirtualSize = size + newImage.SharedSize = -1 + newImage.Containers = -1 + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/daemon/import.go b/daemon/import.go new file mode 100644 index 0000000..0409cd6 --- /dev/null +++ b/daemon/import.go @@ -0,0 +1,137 @@ +package daemon + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/pkg/errors" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { + var ( + rc io.ReadCloser + resp *http.Response + newRef reference.Named + ) + + // Default the platform if not supplied. + if platform == "" { + platform = runtime.GOOS + } + + if repository != "" { + var err error + newRef, err = reference.ParseNormalizedNamed(repository) + if err != nil { + return err + } + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) + if err != nil { + return err + } + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + if len(strings.Split(src, "://")) == 1 { + src = "http://" + src + } + u, err := url.Parse(src) + if err != nil { + return err + } + + resp, err = remotecontext.GetWithStatusError(u.String()) + if err != nil { + return err + } + outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) + progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + l, err := daemon.stores[platform].layerStore.Register(inflatedLayerData, "", layer.Platform(platform)) + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: platform, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.stores[platform].imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImageWithReference(id, platform, newRef); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(streamformatter.FormatStatus("", id.String())) + return nil +} diff --git a/daemon/info.go b/daemon/info.go new file mode 100644 index 0000000..a4579b1 --- /dev/null +++ b/daemon/info.go @@ -0,0 +1,190 @@ +package daemon + +import ( + "fmt" + "os" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/debug" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/volume/drivers" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + meminfo = &system.MemInfo{} + } + + sysInfo := sysinfo.New(true) + cRunning, cPaused, cStopped := stateCtr.get() + + securityOptions := []string{} + if sysInfo.AppArmor { + securityOptions = append(securityOptions, "name=apparmor") + } + if sysInfo.Seccomp && supportsSeccomp { + profile := daemon.seccompProfilePath + if profile == "" { + profile = "default" + } + securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile)) + } + if selinuxEnabled() { + securityOptions = append(securityOptions, "name=selinux") + } + rootIDs := daemon.idMappings.RootPair() + if rootIDs.UID != 0 || rootIDs.GID != 0 { + securityOptions = append(securityOptions, "name=userns") + } + + imageCount := 0 + drivers := "" + for p, ds := range daemon.stores { + imageCount += len(ds.imageStore.Map()) + drivers += daemon.GraphDriverName(p) + if len(daemon.stores) > 1 { + drivers += fmt.Sprintf(" (%s) ", p) + } + } + + // TODO @jhowardmsft LCOW support. For now, hard-code the platform shown for the driver status + p := runtime.GOOS + if system.LCOWSupported() { + p = "linux" + } + + drivers = strings.TrimSpace(drivers) + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: imageCount, + Driver: drivers, + DriverStatus: daemon.stores[p].layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: debug.IsEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: sysinfo.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: daemon.configStore.Experimental, + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled, + SecurityOptions: securityOptions, + Isolation: daemon.defaultIsolation, + } + + // Retrieve platform specific info + daemon.FillPlatformInfo(v, sysInfo) + + hostname := "" + if hn, err := os.Hostname(); err != nil { + logrus.Warnf("Could not get hostname: %v", err) + } else { + hostname = hn + } + v.Name = hostname + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Engine: "docker", + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + MinAPIVersion: api.MinVersion, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: daemon.configStore.Experimental, + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + pluginsInfo.Network = daemon.GetNetworkDriverList() + // The authorization plugins are returned in the order they are + // used as they constitute a request/response modification chain. + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + pluginsInfo.Log = logger.ListDrivers() + + return pluginsInfo +} diff --git a/daemon/info_unix.go b/daemon/info_unix.go new file mode 100644 index 0000000..c1f6a4f --- /dev/null +++ b/daemon/info_unix.go @@ -0,0 +1,93 @@ +// +build !windows + +package daemon + +import ( + "context" + "os/exec" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/sysinfo" + "github.com/pkg/errors" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + v.Runtimes = daemon.configStore.GetAllRuntimes() + v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() + v.InitBinary = daemon.configStore.GetInitPath() + + v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID + if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil { + v.ContainerdCommit.ID = sv.Revision + } else { + logrus.Warnf("failed to retrieve containerd version: %v", err) + v.ContainerdCommit.ID = "N/A" + } + + v.RuncCommit.Expected = dockerversion.RuncCommitID + defaultRuntimeBinary := daemon.configStore.GetRuntime(daemon.configStore.GetDefaultRuntimeName()).Path + if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), "\n") + if len(parts) == 3 { + parts = strings.Split(parts[1], ": ") + if len(parts) == 2 { + v.RuncCommit.ID = strings.TrimSpace(parts[1]) + } + } + + if v.RuncCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", defaultRuntimeBinary, string(rv)) + v.RuncCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) + v.RuncCommit.ID = "N/A" + } + + defaultInitBinary := daemon.configStore.GetInitPath() + if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { + ver, err := parseInitVersion(string(rv)) + + if err != nil { + logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) + } + v.InitCommit = ver + } else { + //logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) + v.InitCommit.ID = "N/A" + } +} + +// parseInitVersion parses a Tini version string, and extracts the version. +func parseInitVersion(v string) (types.Commit, error) { + version := types.Commit{ID: "", Expected: dockerversion.InitCommitID} + parts := strings.Split(strings.TrimSpace(v), " - ") + + if len(parts) >= 2 { + gitParts := strings.Split(parts[1], ".") + if len(gitParts) == 2 && gitParts[0] == "git" { + version.ID = gitParts[1] + version.Expected = dockerversion.InitCommitID[0:len(version.ID)] + } + } + if version.ID == "" && strings.HasPrefix(parts[0], "tini version ") { + version.ID = "v" + strings.TrimPrefix(parts[0], "tini version ") + } + if version.ID == "" { + version.ID = "N/A" + return version, errors.Errorf("unknown output format: %s", v) + } + return version, nil +} diff --git a/daemon/info_unix_test.go b/daemon/info_unix_test.go new file mode 100644 index 0000000..ef36c40 --- /dev/null +++ b/daemon/info_unix_test.go @@ -0,0 +1,52 @@ +// +build !windows + +package daemon + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/stretchr/testify/assert" +) + +func TestParseInitVersion(t *testing.T) { + tests := []struct { + version string + result types.Commit + invalid bool + }{ + { + version: "tini version 0.13.0 - git.949e6fa", + result: types.Commit{ID: "949e6fa", Expected: dockerversion.InitCommitID[0:7]}, + }, { + version: "tini version 0.13.0\n", + result: types.Commit{ID: "v0.13.0", Expected: dockerversion.InitCommitID}, + }, { + version: "tini version 0.13.2", + result: types.Commit{ID: "v0.13.2", Expected: dockerversion.InitCommitID}, + }, { + version: "tini version0.13.2", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, { + version: "", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, { + version: "hello world", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, + } + + for _, test := range tests { + ver, err := parseInitVersion(string(test.version)) + if test.invalid { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.result, ver) + } +} diff --git a/daemon/info_windows.go b/daemon/info_windows.go new file mode 100644 index 0000000..c700911 --- /dev/null +++ b/daemon/info_windows.go @@ -0,0 +1,10 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { +} diff --git a/daemon/initlayer/setup_solaris.go b/daemon/initlayer/setup_solaris.go new file mode 100644 index 0000000..66d53f0 --- /dev/null +++ b/daemon/initlayer/setup_solaris.go @@ -0,0 +1,13 @@ +// +build solaris,cgo + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/daemon/initlayer/setup_unix.go b/daemon/initlayer/setup_unix.go new file mode 100644 index 0000000..cdd8973 --- /dev/null +++ b/daemon/initlayer/setup_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +package initlayer + +import ( + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/idtools" +) + +// Setup populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootIDs idtools.IDPair) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootIDs); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, pth), 0755, rootIDs); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootIDs.UID, rootIDs.GID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} diff --git a/daemon/initlayer/setup_windows.go b/daemon/initlayer/setup_windows.go new file mode 100644 index 0000000..2b22f58 --- /dev/null +++ b/daemon/initlayer/setup_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package initlayer + +import ( + "github.com/docker/docker/pkg/idtools" +) + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootIDs idtools.IDPair) error { + return nil +} diff --git a/daemon/inspect.go b/daemon/inspect.go new file mode 100644 index 0000000..47c1ba4 --- /dev/null +++ b/daemon/inspect.go @@ -0,0 +1,270 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/go-connections/nat" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { + switch { + case versions.LessThan(version, "1.20"): + return daemon.containerInspectPre120(name) + case versions.Equal(version, "1.20"): + return daemon.containerInspect120(name) + } + return daemon.ContainerInspectCurrent(name, size) +} + +// ContainerInspectCurrent returns low-level information about a +// container in a most recent api version. +func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + + base, err := daemon.getInspectData(container) + if err != nil { + container.Unlock() + return nil, err + } + + apiNetworks := make(map[string]*networktypes.EndpointSettings) + for name, epConf := range container.NetworkSettings.Networks { + if epConf.EndpointSettings != nil { + // We must make a copy of this pointer object otherwise it can race with other operations + apiNetworks[name] = epConf.EndpointSettings.Copy() + } + } + + mountPoints := container.GetMountPoints() + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: apiNetworks, + } + + ports := make(nat.PortMap, len(container.NetworkSettings.Ports)) + for k, pm := range container.NetworkSettings.Ports { + ports[k] = pm + } + networkSettings.NetworkSettingsBase.Ports = ports + + container.Unlock() + + if size { + sizeRw, sizeRootFs := daemon.getSize(base.ID) + base.SizeRw = &sizeRw + base.SizeRootFs = &sizeRootFs + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container) + if err != nil { + return nil, err + } + + mountPoints := container.GetMountPoints() + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // We merge the Ulimits from hostConfig with daemon default + daemon.mergeUlimits(&hostConfig) + + var containerHealth *types.Health + if container.State.Health != nil { + containerHealth = &types.Health{ + Status: container.State.Health.Status, + FailingStreak: container.State.Health.FailingStreak, + Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), + } + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode(), + Error: container.State.ErrorMsg, + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + Health: containerHealth, + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + Platform: container.Platform, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + // If container is marked as Dead, the container's graphdriver metadata + // could have been removed, it will cause error if we try to get the metadata, + // we can ignore the error if the container is dead. + if err != nil && !container.Dead { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + Pid: e.Pid, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + apiV.Status = v.Status() + return apiV, nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*network.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok && defaultNetwork.EndpointSettings != nil { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/daemon/inspect_solaris.go b/daemon/inspect_solaris.go new file mode 100644 index 0000000..0b275c1 --- /dev/null +++ b/daemon/inspect_solaris.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + return &v1p19.ContainerJSON{}, nil +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/daemon/inspect_unix.go b/daemon/inspect_unix.go new file mode 100644 index 0000000..bd28481 --- /dev/null +++ b/daemon/inspect_unix.go @@ -0,0 +1,75 @@ +// +build !windows,!solaris + +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/daemon/inspect_windows.go b/daemon/inspect_windows.go new file mode 100644 index 0000000..5b12902 --- /dev/null +++ b/daemon/inspect_windows.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { + return daemon.ContainerInspectCurrent(name, false) +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/daemon/keys.go b/daemon/keys.go new file mode 100644 index 0000000..055d488 --- /dev/null +++ b/daemon/keys.go @@ -0,0 +1,59 @@ +// +build linux + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +const ( + rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" + rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" + rootKeyLimit = 1000000 + // it is standard configuration to allocate 25 bytes per key + rootKeyByteMultiplier = 25 +) + +// ModifyRootKeyLimit checks to see if the root key limit is set to +// at least 1000000 and changes it to that limit along with the maxbytes +// allocated to the keys at a 25 to 1 multiplier. +func ModifyRootKeyLimit() error { + value, err := readRootKeyLimit(rootKeyFile) + if err != nil { + return err + } + if value < rootKeyLimit { + return setRootKeyLimit(rootKeyLimit) + } + return nil +} + +func setRootKeyLimit(limit int) error { + keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer keys.Close() + if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { + return err + } + bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer bytes.Close() + _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) + return err +} + +func readRootKeyLimit(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(strings.Trim(string(data), "\n")) +} diff --git a/daemon/keys_unsupported.go b/daemon/keys_unsupported.go new file mode 100644 index 0000000..e49baf9 --- /dev/null +++ b/daemon/keys_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package daemon + +// ModifyRootKeyLimit is a noop on unsupported platforms. +func ModifyRootKeyLimit() error { + return nil +} diff --git a/daemon/kill.go b/daemon/kill.go new file mode 100644 index 0000000..a222071 --- /dev/null +++ b/daemon/kill.go @@ -0,0 +1,171 @@ +package daemon + +import ( + "context" + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int) error { + logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping or killing", container.ID) + } + + if !container.Running { + return errNotRunning{container.ID} + } + + if container.Config.StopSignal != "" && syscall.Signal(sig) != syscall.SIGKILL { + containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) + if err != nil { + return err + } + if containerStopSignal == syscall.Signal(sig) { + container.ExitOnNext() + } + } else { + container.ExitOnNext() + } + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on its next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) + } else { + return err + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *containerpkg.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // it's probably because it's already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + return err + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + // Wait for exit with no timeout. + // Ignore returned status. + _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} + +func (daemon *Daemon) kill(c *containerpkg.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} diff --git a/daemon/links.go b/daemon/links.go new file mode 100644 index 0000000..7f691d4 --- /dev/null +++ b/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/daemon/links/links.go b/daemon/links/links.go new file mode 100644 index 0000000..af15de0 --- /dev/null +++ b/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/daemon/links/links_test.go b/daemon/links/links_test.go new file mode 100644 index 0000000..b852c44 --- /dev/null +++ b/daemon/links/links_test.go @@ -0,0 +1,213 @@ +package links + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatal("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + +func TestLinkNew(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) + + if link.Name != "/db/docker" { + t.Fail() + } + if link.ParentIP != "172.0.17.3" { + t.Fail() + } + if link.ChildIP != "172.0.17.2" { + t.Fail() + } + for _, p := range link.Ports { + if p != newPortNoError("tcp", "6379") { + t.Fail() + } + } +} + +func TestLinkEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkPortRangeEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } + for _, i := range []int{6379, 6380, 6381} { + tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) + tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP_PORT", i) + tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP_PROTO", i) + tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) + if env[tcpaddr] != "172.0.17.2" { + t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) + } + if env[tcpport] != fmt.Sprintf("%d", i) { + t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) + } + if env[tcpproto] != "tcp" { + t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) + } + if env[tcp] != fmt.Sprintf("tcp://172.0.17.2:%d", i) { + t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) + } + } +} diff --git a/daemon/list.go b/daemon/list.go new file mode 100644 index 0000000..870a7bd --- /dev/null +++ b/daemon/list.go @@ -0,0 +1,668 @@ +package daemon + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, + "name": true, + "driver": true, + "label": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "health": true, + "since": true, + "volume": true, + "network": true, + "is-task": true, + "publish": true, + "expose": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Snapshot, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // beforeFilter is a filter to ignore containers that appear before the one given + beforeFilter *container.Snapshot + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + sinceFilter *container.Snapshot + + // taskFilter tells if we should filter based on wether a container is part of a task + taskFilter bool + // isTask tells us if the we should filter container that are a task (true) or not (false) + isTask bool + + // publish is a list of published ports to filter with + publish map[nat.Port]bool + // expose is a list of exposed ports to filter with + expose map[nat.Port]bool + + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// byCreatedDescending is a temporary type used to sort a list of containers by creation time. +type byCreatedDescending []container.Snapshot + +func (r byCreatedDescending) Len() int { return len(r) } +func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreatedDescending) Less(i, j int) bool { + return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano() +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.refreshImage) +} + +func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { + idSearch := false + names := ctx.filters.Get("name") + ids := ctx.filters.Get("id") + if len(names)+len(ids) == 0 { + // if name or ID filters are not in use, return to + // standard behavior of walking the entire container + // list from the daemon's in-memory store + all, err := view.All() + sort.Sort(byCreatedDescending(all)) + return all, err + } + + // idSearch will determine if we limit name matching to the IDs + // matched from any IDs which were specified as filters + if len(ids) > 0 { + idSearch = true + } + + matches := make(map[string]bool) + // find ID matches; errors represent "not found" and can be ignored + for _, id := range ids { + if fullID, err := daemon.idIndex.Get(id); err == nil { + matches[fullID] = true + } + } + + // look for name matches; if ID filtering was used, then limit the + // search space to the matches map only; errors represent "not found" + // and can be ignored + if len(names) > 0 { + for id, idNames := range ctx.names { + // if ID filters were used and no matches on that ID were + // found, continue to next ID in the list + if idSearch && !matches[id] { + continue + } + for _, eachName := range idNames { + if ctx.filters.Match("name", eachName) { + matches[id] = true + } + } + } + } + + cntrs := make([]container.Snapshot, 0, len(matches)) + for id := range matches { + c, err := view.Get(id) + if err != nil { + return nil, err + } + if c != nil { + cntrs = append(cntrs, *c) + } + } + + // Restore sort-order after filtering + // Created gives us nanosec resolution for sorting + sort.Sort(byCreatedDescending(cntrs)) + + return cntrs, nil +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + var ( + view = daemon.containersReplica.Snapshot(daemon.nameIndex) + containers = []*types.Container{} + ) + + ctx, err := daemon.foldFilter(view, config) + if err != nil { + return nil, err + } + + // fastpath to only look at a subset of containers if specific name + // or ID matches were provided by the user--otherwise we potentially + // end up querying many more containers than intended + containerList, err := daemon.filterByNameIDMatches(view, ctx) + if err != nil { + return nil, err + } + + for i := range containerList { + t, err := daemon.reducePsContainer(&containerList[i], ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *listContext, reducer containerReducer) (*types.Container, error) { + // filter containers to return + switch includeContainerInList(container, ctx) { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + newC, err := reducer(container, ctx) + if err != nil { + return nil, err + } + + // release lock because size calculation is slow + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(newC.ID) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + return newC, nil +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filters + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var taskFilter, isTask bool + if psFilters.Include("is-task") { + if psFilters.ExactMatch("is-task", "true") { + taskFilter = true + isTask = true + } else if psFilters.ExactMatch("is-task", "false") { + taskFilter = true + isTask = false + } else { + return nil, fmt.Errorf("Invalid filter 'is-task=%s'", psFilters.Get("is-task")) + } + } + + err = psFilters.WalkValues("health", func(value string) error { + if !container.IsValidHealthString(value) { + return fmt.Errorf("Unrecognised filter value for health: %s", value) + } + + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Snapshot + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = view.Get(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = view.Get(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, platform, err := daemon.GetImageIDAndPlatform(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.stores[platform].imageStore.Children) + return nil + }) + } + + publishFilter := map[nat.Port]bool{} + err = psFilters.WalkValues("publish", portOp("publish", publishFilter)) + if err != nil { + return nil, err + } + + exposeFilter := map[nat.Port]bool{} + err = psFilters.WalkValues("expose", portOp("expose", exposeFilter)) + if err != nil { + return nil, err + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + taskFilter: taskFilter, + isTask: isTask, + publish: publishFilter, + expose: exposeFilter, + ContainerListOptions: config, + names: daemon.nameIndex.GetAll(), + }, nil +} +func portOp(key string, filter map[nat.Port]bool) func(value string) error { + return func(value string) error { + if strings.Contains(value, ":") { + return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value) + } + //support two formats, original format /[] or /[] + proto, port := nat.SplitProtoPort(value) + start, end, err := nat.ParsePortRange(port) + if err != nil { + return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) + } + filter[p] = true + } + return nil + } +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Snapshot, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + if !container.Running && !ctx.All && ctx.Limit <= 0 { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + if ctx.taskFilter { + if ctx.isTask != container.Managed { + return excludeContainer + } + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode && !container.Running && !container.StartedAt.IsZero() { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State) { + return excludeContainer + } + + // Do not include container if its health doesn't match the filter + if !ctx.filters.ExactMatch("health", container.Health) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]types.MountPoint) + for _, m := range container.Mounts { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + volumesByDestination := make(map[string]types.MountPoint) + for _, m := range container.Mounts { + if m.Destination != "" { + volumesByDestination[m.Destination] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := volumesByDestination[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[image.ID(container.ImageID)] { + return excludeContainer + } + } + + var ( + networkExist = errors.New("container part of network") + noNetworks = errors.New("container is not part of any networks") + ) + if ctx.filters.Include("network") { + err := ctx.filters.WalkValues("network", func(value string) error { + if container.NetworkSettings == nil { + return noNetworks + } + if _, ok := container.NetworkSettings.Networks[value]; ok { + return networkExist + } + for _, nw := range container.NetworkSettings.Networks { + if nw == nil { + continue + } + if strings.HasPrefix(nw.NetworkID, value) { + return networkExist + } + } + return nil + }) + if err != networkExist { + return excludeContainer + } + } + + if len(ctx.publish) > 0 { + shouldSkip := true + for port := range ctx.publish { + if _, ok := container.PortBindings[port]; ok { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + if len(ctx.expose) > 0 { + shouldSkip := true + for port := range ctx.expose { + if _, ok := container.ExposedPorts[port]; ok { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + return includeContainer +} + +// refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't +func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) { + c := s.Container + image := s.Image // keep the original ref if still valid (hasn't changed) + if image != s.ImageID { + id, _, err := daemon.GetImageIDAndPlatform(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id.String() != s.ImageID { + // ref changed, we need to use original ID + image = s.ImageID + } + } + c.Image = image + return &c, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + + filterVolumes, err := daemon.filterVolumes(volumes, volFilters) + if err != nil { + return nil, nil, err + } + for _, v := range filterVolumes { + apiV := volumeToAPIType(v) + if vv, ok := v.(interface { + CachedPath() string + }); ok { + apiV.Mountpoint = vv.CachedPath() + } else { + apiV.Mountpoint = v.Path() + } + volumesOut = append(volumesOut, apiV) + } + return volumesOut, warnings, nil +} + +// filterVolumes filters volume list according to user specified filter +// and returns user chosen volumes +func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { + // if filter is empty, return original volume list + if filter.Len() == 0 { + return vols, nil + } + + var retVols []volume.Volume + for _, vol := range vols { + if filter.Include("name") { + if !filter.Match("name", vol.Name()) { + continue + } + } + if filter.Include("driver") { + if !filter.ExactMatch("driver", vol.DriverName()) { + continue + } + } + if filter.Include("label") { + v, ok := vol.(volume.DetailedVolume) + if !ok { + continue + } + if !filter.MatchKVList("label", v.Labels()) { + continue + } + } + retVols = append(retVols, vol) + } + danglingOnly := false + if filter.Include("dangling") { + if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) + } + retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) + } + return retVols, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/daemon/list_unix.go b/daemon/list_unix.go new file mode 100644 index 0000000..ebaae45 --- /dev/null +++ b/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd solaris + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/daemon/list_windows.go b/daemon/list_windows.go new file mode 100644 index 0000000..ab563c5 --- /dev/null +++ b/daemon/list_windows.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" +) + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { + i := strings.ToLower(string(container.HostConfig.Isolation)) + if i == "" { + i = "default" + } + if !ctx.filters.Match("isolation", i) { + return excludeContainer + } + return includeContainer +} diff --git a/daemon/logdrivers_linux.go b/daemon/logdrivers_linux.go new file mode 100644 index 0000000..d2d2bdf --- /dev/null +++ b/daemon/logdrivers_linux.go @@ -0,0 +1,8 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/journald" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" +) diff --git a/daemon/logdrivers_windows.go b/daemon/logdrivers_windows.go new file mode 100644 index 0000000..f3002b9 --- /dev/null +++ b/daemon/logdrivers_windows.go @@ -0,0 +1,13 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/etwlogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/daemon/logger/adapter.go b/daemon/logger/adapter.go new file mode 100644 index 0000000..a187b30 --- /dev/null +++ b/daemon/logger/adapter.go @@ -0,0 +1,137 @@ +package logger + +import ( + "io" + "os" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/pkg/plugingetter" + "github.com/pkg/errors" +) + +// pluginAdapter takes a plugin and implements the Logger interface for logger +// instances +type pluginAdapter struct { + driverName string + id string + plugin logPlugin + basePath string + fifoPath string + capabilities Capability + logInfo Info + + // synchronize access to the log stream and shared buffer + mu sync.Mutex + enc logdriver.LogEntryEncoder + stream io.WriteCloser + // buf is shared for each `Log()` call to reduce allocations. + // buf must be protected by mutex + buf logdriver.LogEntry +} + +func (a *pluginAdapter) Log(msg *Message) error { + a.mu.Lock() + + a.buf.Line = msg.Line + a.buf.TimeNano = msg.Timestamp.UnixNano() + a.buf.Partial = msg.Partial + a.buf.Source = msg.Source + + err := a.enc.Encode(&a.buf) + a.buf.Reset() + + a.mu.Unlock() + + PutMessage(msg) + return err +} + +func (a *pluginAdapter) Name() string { + return a.driverName +} + +func (a *pluginAdapter) Close() error { + a.mu.Lock() + defer a.mu.Unlock() + + if err := a.plugin.StopLogging(strings.TrimPrefix(a.fifoPath, a.basePath)); err != nil { + return err + } + + if err := a.stream.Close(); err != nil { + logrus.WithError(err).Error("error closing plugin fifo") + } + if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { + logrus.WithError(err).Error("error cleaning up plugin fifo") + } + + // may be nil, especially for unit tests + if pluginGetter != nil { + pluginGetter.Get(a.Name(), extName, plugingetter.Release) + } + return nil +} + +type pluginAdapterWithRead struct { + *pluginAdapter +} + +func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { + watcher := NewLogWatcher() + + go func() { + defer close(watcher.Msg) + stream, err := a.plugin.ReadLogs(a.logInfo, config) + if err != nil { + watcher.Err <- errors.Wrap(err, "error getting log reader") + return + } + defer stream.Close() + + dec := logdriver.NewLogEntryDecoder(stream) + for { + select { + case <-watcher.WatchClose(): + return + default: + } + + var buf logdriver.LogEntry + if err := dec.Decode(&buf); err != nil { + if err == io.EOF { + return + } + select { + case watcher.Err <- errors.Wrap(err, "error decoding log message"): + case <-watcher.WatchClose(): + } + return + } + + msg := &Message{ + Timestamp: time.Unix(0, buf.TimeNano), + Line: buf.Line, + Source: buf.Source, + } + + // plugin should handle this, but check just in case + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + + select { + case watcher.Msg <- msg: + case <-watcher.WatchClose(): + // make sure the message we consumed is sent + watcher.Msg <- msg + return + } + } + }() + + return watcher +} diff --git a/daemon/logger/adapter_test.go b/daemon/logger/adapter_test.go new file mode 100644 index 0000000..b8c069f --- /dev/null +++ b/daemon/logger/adapter_test.go @@ -0,0 +1,180 @@ +package logger + +import ( + "encoding/binary" + "io" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/docker/docker/api/types/plugins/logdriver" + protoio "github.com/gogo/protobuf/io" + "github.com/stretchr/testify/assert" +) + +// mockLoggingPlugin implements the loggingPlugin interface for testing purposes +// it only supports a single log stream +type mockLoggingPlugin struct { + inStream io.ReadCloser + f *os.File + closed chan struct{} + t *testing.T +} + +func (l *mockLoggingPlugin) StartLogging(file string, info Info) error { + go func() { + io.Copy(l.f, l.inStream) + close(l.closed) + }() + return nil +} + +func (l *mockLoggingPlugin) StopLogging(file string) error { + l.inStream.Close() + l.f.Close() + os.Remove(l.f.Name()) + return nil +} + +func (l *mockLoggingPlugin) Capabilities() (cap Capability, err error) { + return Capability{ReadLogs: true}, nil +} + +func (l *mockLoggingPlugin) ReadLogs(info Info, config ReadConfig) (io.ReadCloser, error) { + r, w := io.Pipe() + f, err := os.Open(l.f.Name()) + if err != nil { + return nil, err + } + go func() { + defer f.Close() + dec := protoio.NewUint32DelimitedReader(f, binary.BigEndian, 1e6) + enc := logdriver.NewLogEntryEncoder(w) + + for { + select { + case <-l.closed: + w.Close() + return + default: + } + + var msg logdriver.LogEntry + if err := dec.ReadMsg(&msg); err != nil { + if err == io.EOF { + if !config.Follow { + w.Close() + return + } + dec = protoio.NewUint32DelimitedReader(f, binary.BigEndian, 1e6) + continue + } + + l.t.Fatal(err) + continue + } + + if err := enc.Encode(&msg); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return r, nil +} + +func newMockPluginAdapter(t *testing.T) Logger { + r, w := io.Pipe() + f, err := ioutil.TempFile("", "mock-plugin-adapter") + assert.NoError(t, err) + + enc := logdriver.NewLogEntryEncoder(w) + a := &pluginAdapterWithRead{ + &pluginAdapter{ + plugin: &mockLoggingPlugin{ + inStream: r, + f: f, + closed: make(chan struct{}), + t: t, + }, + stream: w, + enc: enc, + }, + } + a.plugin.StartLogging("", Info{}) + return a +} + +func TestAdapterReadLogs(t *testing.T) { + l := newMockPluginAdapter(t) + + testMsg := []Message{ + {Line: []byte("Are you the keymaker?"), Timestamp: time.Now()}, + {Line: []byte("Follow the white rabbit"), Timestamp: time.Now()}, + } + for _, msg := range testMsg { + m := msg.copy() + assert.NoError(t, l.Log(m)) + } + + lr, ok := l.(LogReader) + assert.NotNil(t, ok) + + lw := lr.ReadLogs(ReadConfig{}) + + for _, x := range testMsg { + select { + case msg := <-lw.Msg: + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + } + + select { + case _, ok := <-lw.Msg: + assert.False(t, ok, "expected message channel to be closed") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for message channel to close") + + } + lw.Close() + + lw = lr.ReadLogs(ReadConfig{Follow: true}) + for _, x := range testMsg { + select { + case msg := <-lw.Msg: + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + } + + x := Message{Line: []byte("Too infinity and beyond!"), Timestamp: time.Now()} + assert.NoError(t, l.Log(x.copy())) + + select { + case msg, ok := <-lw.Msg: + assert.NotNil(t, ok, "message channel unexpectedly closed") + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + + l.Close() + select { + case msg, ok := <-lw.Msg: + assert.False(t, ok, "expected message channel to be closed") + assert.Nil(t, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for logger to close") + } +} + +func testMessageEqual(t *testing.T, a, b *Message) { + assert.Equal(t, a.Line, b.Line) + assert.Equal(t, a.Timestamp.UnixNano(), b.Timestamp.UnixNano()) + assert.Equal(t, a.Source, b.Source) +} diff --git a/daemon/logger/awslogs/cloudwatchlogs.go b/daemon/logger/awslogs/cloudwatchlogs.go new file mode 100644 index 0000000..d44f09d --- /dev/null +++ b/daemon/logger/awslogs/cloudwatchlogs.go @@ -0,0 +1,598 @@ +// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs +package awslogs + +import ( + "bytes" + "fmt" + "os" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/templates" + "github.com/pkg/errors" +) + +const ( + name = "awslogs" + regionKey = "awslogs-region" + regionEnvKey = "AWS_REGION" + logGroupKey = "awslogs-group" + logStreamKey = "awslogs-stream" + logCreateGroupKey = "awslogs-create-group" + tagKey = "tag" + datetimeFormatKey = "awslogs-datetime-format" + multilinePatternKey = "awslogs-multiline-pattern" + batchPublishFrequency = 5 * time.Second + + // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + perEventBytes = 26 + maximumBytesPerPut = 1048576 + maximumLogEventsPerPut = 10000 + + // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html + maximumBytesPerEvent = 262144 - perEventBytes + + resourceAlreadyExistsCode = "ResourceAlreadyExistsException" + dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" + invalidSequenceTokenCode = "InvalidSequenceTokenException" + resourceNotFoundCode = "ResourceNotFoundException" + + userAgentHeader = "User-Agent" +) + +type logStream struct { + logStreamName string + logGroupName string + logCreateGroup bool + multilinePattern *regexp.Regexp + client api + messages chan *logger.Message + lock sync.RWMutex + closed bool + sequenceToken *string +} + +type api interface { + CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) +} + +type regionFinder interface { + Region() (string, error) +} + +type wrappedEvent struct { + inputLogEvent *cloudwatchlogs.InputLogEvent + insertOrder int +} +type byTimestamp []wrappedEvent + +// init registers the awslogs driver +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates an awslogs logger using the configuration passed in on the +// context. Supported context configuration variables are awslogs-region, +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern +// and awslogs-datetime-format. When available, configuration is +// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, +// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and +// the EC2 Instance Metadata Service. +func New(info logger.Info) (logger.Logger, error) { + logGroupName := info.Config[logGroupKey] + logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") + if err != nil { + return nil, err + } + logCreateGroup := false + if info.Config[logCreateGroupKey] != "" { + logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) + if err != nil { + return nil, err + } + } + + if info.Config[logStreamKey] != "" { + logStreamName = info.Config[logStreamKey] + } + + multilinePattern, err := parseMultilineOptions(info) + if err != nil { + return nil, err + } + + client, err := newAWSLogsClient(info) + if err != nil { + return nil, err + } + containerStream := &logStream{ + logStreamName: logStreamName, + logGroupName: logGroupName, + logCreateGroup: logCreateGroup, + multilinePattern: multilinePattern, + client: client, + messages: make(chan *logger.Message, 4096), + } + err = containerStream.create() + if err != nil { + return nil, err + } + go containerStream.collectBatch() + + return containerStream, nil +} + +// Parses awslogs-multiline-pattern and awslogs-datetime-format options +// If awslogs-datetime-format is present, convert the format from strftime +// to regexp and return. +// If awslogs-multiline-pattern is present, compile regexp and return +func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { + dateTimeFormat := info.Config[datetimeFormatKey] + multilinePatternKey := info.Config[multilinePatternKey] + // strftime input is parsed into a regular expression + if dateTimeFormat != "" { + // %. matches each strftime format sequence and ReplaceAllStringFunc + // looks up each format sequence in the conversion table strftimeToRegex + // to replace with a defined regular expression + r := regexp.MustCompile("%.") + multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { + return strftimeToRegex[s] + }) + } + if multilinePatternKey != "" { + multilinePattern, err := regexp.Compile(multilinePatternKey) + if err != nil { + return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) + } + return multilinePattern, nil + } + return nil, nil +} + +// Maps strftime format strings to regex +var strftimeToRegex = map[string]string{ + /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, + /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, + /*weekdayZeroIndex */ `%w`: `[0-6]`, + /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, + /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, + /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, + /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, + /*yearCentury */ `%Y`: `\d{4}`, + /*yearZeroPadded */ `%y`: `\d{2}`, + /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, + /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, + /*AM or PM */ `%p`: "[A,P]M", + /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, + /*secondZeroPadded */ `%S`: `[0-5][0-9]`, + /*microsecondZeroPadded */ `%f`: `\d{6}`, + /*utcOffset */ `%z`: `[+-]\d{4}`, + /*tzName */ `%Z`: `[A-Z]{1,4}T`, + /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, + /*milliseconds */ `%L`: `\.\d{3}`, +} + +func parseLogGroup(info logger.Info, groupTemplate string) (string, error) { + tmpl, err := templates.NewParse("log-group", groupTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &info); err != nil { + return "", err + } + + return buf.String(), nil +} + +// newRegionFinder is a variable such that the implementation +// can be swapped out for unit tests. +var newRegionFinder = func() regionFinder { + return ec2metadata.New(session.New()) +} + +// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. +// Customizations to the default client from the SDK include a Docker-specific +// User-Agent string and automatic region detection using the EC2 Instance +// Metadata Service when region is otherwise unspecified. +func newAWSLogsClient(info logger.Info) (api, error) { + var region *string + if os.Getenv(regionEnvKey) != "" { + region = aws.String(os.Getenv(regionEnvKey)) + } + if info.Config[regionKey] != "" { + region = aws.String(info.Config[regionKey]) + } + if region == nil || *region == "" { + logrus.Info("Trying to get region from EC2 Metadata") + ec2MetadataClient := newRegionFinder() + r, err := ec2MetadataClient.Region() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + }).Error("Could not get region from EC2 metadata, environment, or log option") + return nil, errors.New("Cannot determine region for awslogs driver") + } + region = &r + } + logrus.WithFields(logrus.Fields{ + "region": *region, + }).Debug("Created awslogs client") + + client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) + + client.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "DockerUserAgentHandler", + Fn: func(r *request.Request) { + currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) + r.HTTPRequest.Header.Set(userAgentHeader, + fmt.Sprintf("Docker %s (%s) %s", + dockerversion.Version, runtime.GOOS, currentAgent)) + }, + }) + return client, nil +} + +// Name returns the name of the awslogs logging driver +func (l *logStream) Name() string { + return name +} + +// Log submits messages for logging by an instance of the awslogs logging driver +func (l *logStream) Log(msg *logger.Message) error { + l.lock.RLock() + defer l.lock.RUnlock() + if !l.closed { + l.messages <- msg + } + return nil +} + +// Close closes the instance of the awslogs logging driver +func (l *logStream) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if !l.closed { + close(l.messages) + } + l.closed = true + return nil +} + +// create creates log group and log stream for the instance of the awslogs logging driver +func (l *logStream) create() error { + if err := l.createLogStream(); err != nil { + if l.logCreateGroup { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode { + if err := l.createLogGroup(); err != nil { + return err + } + return l.createLogStream() + } + } + return err + } + + return nil +} + +// createLogGroup creates a log group for the instance of the awslogs logging driver +func (l *logStream) createLogGroup() error { + if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(l.logGroupName), + }); err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logCreateGroup": l.logCreateGroup, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log group already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log group") + } + return err + } + return nil +} + +// createLogStream creates a log stream for the instance of the awslogs logging driver +func (l *logStream) createLogStream() error { + input := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + + _, err := l.client.CreateLogStream(input) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log stream already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log stream") + } + } + return err +} + +// newTicker is used for time-based batching. newTicker is a variable such +// that the implementation can be swapped out for unit tests. +var newTicker = func(freq time.Duration) *time.Ticker { + return time.NewTicker(freq) +} + +// collectBatch executes as a goroutine to perform batching of log events for +// submission to the log stream. If the awslogs-multiline-pattern or +// awslogs-datetime-format options have been configured, multiline processing +// is enabled, where log messages are stored in an event buffer until a multiline +// pattern match is found, at which point the messages in the event buffer are +// pushed to CloudWatch logs as a single log event. Multline messages are processed +// according to the maximumBytesPerPut constraint, and the implementation only +// allows for messages to be buffered for a maximum of 2*batchPublishFrequency +// seconds. When events are ready to be processed for submission to CloudWatch +// Logs, the processEvents method is called. If a multiline pattern is not +// configured, log events are submitted to the processEvents method immediately. +func (l *logStream) collectBatch() { + timer := newTicker(batchPublishFrequency) + var events []wrappedEvent + var eventBuffer []byte + var eventBufferTimestamp int64 + for { + select { + case t := <-timer.C: + // If event buffer is older than batch publish frequency flush the event buffer + if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { + eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp + eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond) + eventBufferNegative := eventBufferAge < 0 + if eventBufferExpired || eventBufferNegative { + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + } + } + l.publishBatch(events) + events = events[:0] + case msg, more := <-l.messages: + if !more { + // Flush event buffer + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + l.publishBatch(events) + return + } + if eventBufferTimestamp == 0 { + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + } + unprocessedLine := msg.Line + if l.multilinePattern != nil { + if l.multilinePattern.Match(unprocessedLine) { + // This is a new log event so flush the current eventBuffer to events + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + eventBuffer = eventBuffer[:0] + } + // If we will exceed max bytes per event flush the current event buffer before appending + if len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent { + events = l.processEvent(events, eventBuffer, eventBufferTimestamp) + eventBuffer = eventBuffer[:0] + } + // Append new line + processedLine := append(unprocessedLine, "\n"...) + eventBuffer = append(eventBuffer, processedLine...) + logger.PutMessage(msg) + } else { + events = l.processEvent(events, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond)) + logger.PutMessage(msg) + } + } + } +} + +// processEvent processes log events that are ready for submission to CloudWatch +// logs. Batching is performed on time- and size-bases. Time-based batching +// occurs at a 5 second interval (defined in the batchPublishFrequency const). +// Size-based batching is performed on the maximum number of events per batch +// (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a +// batch (defined in maximumBytesPerPut). Log messages are split by the maximum +// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event +// byte overhead (defined in perEventBytes) which is accounted for in split- and +// batch-calculations. +func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte, timestamp int64) []wrappedEvent { + bytes := 0 + for len(unprocessedLine) > 0 { + // Split line length so it does not exceed the maximum + lineBytes := len(unprocessedLine) + if lineBytes > maximumBytesPerEvent { + lineBytes = maximumBytesPerEvent + } + line := unprocessedLine[:lineBytes] + unprocessedLine = unprocessedLine[lineBytes:] + if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { + // Publish an existing batch if it's already over the maximum number of events or if adding this + // event would push it over the maximum number of total bytes. + l.publishBatch(events) + events = events[:0] + bytes = 0 + } + events = append(events, wrappedEvent{ + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(string(line)), + Timestamp: aws.Int64(timestamp), + }, + insertOrder: len(events), + }) + bytes += (lineBytes + perEventBytes) + } + return events +} + +// publishBatch calls PutLogEvents for a given set of InputLogEvents, +// accounting for sequencing requirements (each request must reference the +// sequence token returned by the previous request). +func (l *logStream) publishBatch(events []wrappedEvent) { + if len(events) == 0 { + return + } + + // events in a batch must be sorted by timestamp + // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + sort.Sort(byTimestamp(events)) + cwEvents := unwrapEvents(events) + + nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dataAlreadyAcceptedCode { + // already submitted, just grab the correct sequence token + parts := strings.Split(awsErr.Message(), " ") + nextSequenceToken = &parts[len(parts)-1] + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Info("Data already accepted, ignoring error") + err = nil + } else if awsErr.Code() == invalidSequenceTokenCode { + // sequence code is bad, grab the correct one and retry + parts := strings.Split(awsErr.Message(), " ") + token := parts[len(parts)-1] + nextSequenceToken, err = l.putLogEvents(cwEvents, &token) + } + } + } + if err != nil { + logrus.Error(err) + } else { + l.sequenceToken = nextSequenceToken + } +} + +// putLogEvents wraps the PutLogEvents API +func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { + input := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: sequenceToken, + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + resp, err := l.client.PutLogEvents(input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Error("Failed to put log events") + } + return nil, err + } + return resp.NextSequenceToken, nil +} + +// ValidateLogOpt looks for awslogs-specific log options awslogs-region, +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, +// awslogs-multiline-pattern +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case logGroupKey: + case logStreamKey: + case logCreateGroupKey: + case regionKey: + case tagKey: + case datetimeFormatKey: + case multilinePatternKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) + } + } + if cfg[logGroupKey] == "" { + return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) + } + if cfg[logCreateGroupKey] != "" { + if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { + return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) + } + } + _, datetimeFormatKeyExists := cfg[datetimeFormatKey] + _, multilinePatternKeyExists := cfg[multilinePatternKey] + if datetimeFormatKeyExists && multilinePatternKeyExists { + return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) + } + return nil +} + +// Len returns the length of a byTimestamp slice. Len is required by the +// sort.Interface interface. +func (slice byTimestamp) Len() int { + return len(slice) +} + +// Less compares two values in a byTimestamp slice by Timestamp. Less is +// required by the sort.Interface interface. +func (slice byTimestamp) Less(i, j int) bool { + iTimestamp, jTimestamp := int64(0), int64(0) + if slice != nil && slice[i].inputLogEvent.Timestamp != nil { + iTimestamp = *slice[i].inputLogEvent.Timestamp + } + if slice != nil && slice[j].inputLogEvent.Timestamp != nil { + jTimestamp = *slice[j].inputLogEvent.Timestamp + } + if iTimestamp == jTimestamp { + return slice[i].insertOrder < slice[j].insertOrder + } + return iTimestamp < jTimestamp +} + +// Swap swaps two values in a byTimestamp slice with each other. Swap is +// required by the sort.Interface interface. +func (slice byTimestamp) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { + cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) + for i, input := range events { + cwEvents[i] = input.inputLogEvent + } + return cwEvents +} diff --git a/daemon/logger/awslogs/cloudwatchlogs_test.go b/daemon/logger/awslogs/cloudwatchlogs_test.go new file mode 100644 index 0000000..e3862ff --- /dev/null +++ b/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -0,0 +1,1053 @@ +package awslogs + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "regexp" + "runtime" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" + "github.com/stretchr/testify/assert" +) + +const ( + groupName = "groupName" + streamName = "streamName" + sequenceToken = "sequenceToken" + nextSequenceToken = "nextSequenceToken" + logline = "this is a log line\r" + multilineLogline = "2017-01-01 01:01:44 This is a multiline log entry\r" +) + +// Generates i multi-line events each with j lines +func (l *logStream) logGenerator(lineCount int, multilineCount int) { + for i := 0; i < multilineCount; i++ { + l.Log(&logger.Message{ + Line: []byte(multilineLogline), + Timestamp: time.Time{}, + }) + for j := 0; j < lineCount; j++ { + l.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + } + } +} + +func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + regionKey: "us-east-1", + }, + } + + client, err := newAWSLogsClient(info) + if err != nil { + t.Fatal(err) + } + realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) + if !ok { + t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") + } + buildHandlerList := realClient.Handlers.Build + request := &request.Request{ + HTTPRequest: &http.Request{ + Header: http.Header{}, + }, + } + buildHandlerList.Run(request) + expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)", + dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + userAgent := request.HTTPRequest.Header.Get("User-Agent") + if userAgent != expectedUserAgentString { + t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", + expectedUserAgentString, userAgent) + } +} + +func TestNewAWSLogsClientRegionDetect(t *testing.T) { + info := logger.Info{ + Config: map[string]string{}, + } + + mockMetadata := newMockMetadataClient() + newRegionFinder = func() regionFinder { + return mockMetadata + } + mockMetadata.regionResult <- ®ionResult{ + successResult: "us-east-1", + } + + _, err := newAWSLogsClient(info) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogStreamName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateLogGroupSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + logCreateGroup: true, + } + mockClient.createLogGroupResult <- &createLogGroupResult{} + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogStreamName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: errors.New("Error!"), + } + + err := stream.create() + + if err == nil { + t.Fatal("Expected non-nil err") + } +} + +func TestCreateAlreadyExists(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), + } + + err := stream.create() + + if err != nil { + t.Fatal("Expected nil err") + } +} + +func TestPublishBatchSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: errors.New("Error!"), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != sequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) + } +} + +func TestPublishBatchInvalidSeqSuccess(t *testing.T) { + mockClient := newMockClientBuffered(2) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != "token" { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchAlreadyAccepted(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != "token" { + t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestCollectBatchSimple(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchTicker(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline + " 1"), + Timestamp: time.Time{}, + }) + stream.Log(&logger.Message{ + Line: []byte(logline + " 2"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + + // Verify first batch + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 1" { + t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != logline+" 2" { + t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) + } + + stream.Log(&logger.Message{ + Line: []byte(logline + " 3"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 3" { + t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) + } + + stream.Close() + +} + +func TestCollectBatchMultilinePattern(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte("xxxx " + logline), + Timestamp: time.Now(), + }) + + ticks <- time.Now() + + // Verify single multiline event + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() + + // Verify single event + argument = <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, "xxxx "+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") +} + +func BenchmarkCollectBatch(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func BenchmarkCollectBatchMultilinePattern(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile(`\d{4}-(?:0[1-9]|1[0-2])-(?:0[1-9]|[1,2][0-9]|3[0,1]) (?:[0,1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]`) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + go stream.collectBatch() + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func TestCollectBatchMultilinePatternMaxEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker batchPublishFrequency seconds later + ticks <- time.Now().Add(batchPublishFrequency * time.Second) + + // Verify single multiline event is flushed after maximum event buffer age (batchPublishFrequency) + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() +} + +func TestCollectBatchMultilinePatternNegativeEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker in past to simulate negative event buffer age + ticks <- time.Now().Add(-time.Second) + + // Verify single multiline event is flushed with a negative event buffer age + argument := <-mockClient.putLogEventsArgument + assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") + assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") + assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + + stream.Close() +} + +func TestCollectBatchClose(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchLineSplit(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerEvent) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != longline { + t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != "B" { + t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) + } +} + +func TestCollectBatchMaxEvents(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + line := "A" + for i := 0; i <= maximumLogEventsPerPut; i++ { + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: time.Time{}, + }) + } + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != maximumLogEventsPerPut { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) + } +} + +func TestCollectBatchMaxTotalBytes(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerPut) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + bytes := 0 + for _, event := range argument.LogEvents { + bytes += len(*event.Message) + } + if bytes > maximumBytesPerPut { + t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) + } + + argument = <-mockClient.putLogEventsArgument + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + message := *argument.LogEvents[0].Message + if message[len(message)-1:] != "B" { + t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) + } +} + +func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + times := maximumLogEventsPerPut + expectedEvents := []*cloudwatchlogs.InputLogEvent{} + timestamp := time.Now() + for i := 0; i < times; i++ { + line := fmt.Sprintf("%d", i) + if i%2 == 0 { + timestamp.Add(1 * time.Nanosecond) + } + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: timestamp, + }) + expectedEvents = append(expectedEvents, &cloudwatchlogs.InputLogEvent{ + Message: aws.String(line), + Timestamp: aws.Int64(timestamp.UnixNano() / int64(time.Millisecond)), + }) + } + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != times { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", times, len(argument.LogEvents)) + } + for i := 0; i < times; i++ { + if !reflect.DeepEqual(*argument.LogEvents[i], *expectedEvents[i]) { + t.Errorf("Expected event to be %v but was %v", *expectedEvents[i], *argument.LogEvents[i]) + } + } +} + +func TestParseLogOptionsMultilinePattern(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + multilinePatternKey: "^xxxx", + }, + } + + multilinePattern, err := parseMultilineOptions(info) + assert.Nil(t, err, "Received unexpected error") + assert.True(t, multilinePattern.MatchString("xxxx"), "No multiline pattern match found") +} + +func TestParseLogOptionsDatetimeFormat(t *testing.T) { + datetimeFormatTests := []struct { + format string + match string + }{ + {"%d/%m/%y %a %H:%M:%S%L %Z", "31/12/10 Mon 08:42:44.345 NZDT"}, + {"%Y-%m-%d %A %I:%M:%S.%f%p%z", "2007-12-04 Monday 08:42:44.123456AM+1200"}, + {"%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b", "Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec"}, + {"%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B", "January|February|March|April|May|June|July|August|September|October|November|December"}, + {"%A|%A|%A|%A|%A|%A|%A", "Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday"}, + {"%a|%a|%a|%a|%a|%a|%a", "Mon|Tue|Wed|Thu|Fri|Sat|Sun"}, + {"Day of the week: %w, Day of the year: %j", "Day of the week: 4, Day of the year: 091"}, + } + for _, dt := range datetimeFormatTests { + t.Run(dt.match, func(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + datetimeFormatKey: dt.format, + }, + } + multilinePattern, err := parseMultilineOptions(info) + assert.Nil(t, err, "Received unexpected error") + assert.True(t, multilinePattern.MatchString(dt.match), "No multiline pattern match found") + }) + } +} + +func TestValidateLogOptionsDatetimeFormatAndMultilinePattern(t *testing.T) { + cfg := map[string]string{ + multilinePatternKey: "^xxxx", + datetimeFormatKey: "%Y-%m-%d", + logGroupKey: groupName, + } + conflictingLogOptionsError := "you cannot configure log opt 'awslogs-datetime-format' and 'awslogs-multiline-pattern' at the same time" + + err := ValidateLogOpt(cfg) + assert.NotNil(t, err, "Expected an error") + assert.Equal(t, err.Error(), conflictingLogOptionsError, "Received invalid error") +} + +func TestCreateTagSuccess(t *testing.T) { + mockClient := newMockClient() + info := logger.Info{ + ContainerName: "/test-container", + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, + } + logStreamName, e := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if e != nil { + t.Errorf("Error generating tag: %q", e) + } + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: logStreamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + + if *argument.LogStreamName != "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890" { + t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") + } +} + +func BenchmarkUnwrapEvents(b *testing.B) { + events := make([]wrappedEvent, maximumLogEventsPerPut) + for i := 0; i < maximumLogEventsPerPut; i++ { + mes := strings.Repeat("0", maximumBytesPerEvent) + events[i].inputLogEvent = &cloudwatchlogs.InputLogEvent{ + Message: &mes, + } + } + + as := assert.New(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + res := unwrapEvents(events) + as.Len(res, maximumLogEventsPerPut) + } +} diff --git a/daemon/logger/awslogs/cwlogsiface_mock_test.go b/daemon/logger/awslogs/cwlogsiface_mock_test.go new file mode 100644 index 0000000..82bb34b --- /dev/null +++ b/daemon/logger/awslogs/cwlogsiface_mock_test.go @@ -0,0 +1,92 @@ +package awslogs + +import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + +type mockcwlogsclient struct { + createLogGroupArgument chan *cloudwatchlogs.CreateLogGroupInput + createLogGroupResult chan *createLogGroupResult + createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput + createLogStreamResult chan *createLogStreamResult + putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput + putLogEventsResult chan *putLogEventsResult +} + +type createLogGroupResult struct { + successResult *cloudwatchlogs.CreateLogGroupOutput + errorResult error +} + +type createLogStreamResult struct { + successResult *cloudwatchlogs.CreateLogStreamOutput + errorResult error +} + +type putLogEventsResult struct { + successResult *cloudwatchlogs.PutLogEventsOutput + errorResult error +} + +func newMockClient() *mockcwlogsclient { + return &mockcwlogsclient{ + createLogGroupArgument: make(chan *cloudwatchlogs.CreateLogGroupInput, 1), + createLogGroupResult: make(chan *createLogGroupResult, 1), + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), + createLogStreamResult: make(chan *createLogStreamResult, 1), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), + putLogEventsResult: make(chan *putLogEventsResult, 1), + } +} + +func newMockClientBuffered(buflen int) *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), + createLogStreamResult: make(chan *createLogStreamResult, buflen), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), + putLogEventsResult: make(chan *putLogEventsResult, buflen), + } +} + +func (m *mockcwlogsclient) CreateLogGroup(input *cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) { + m.createLogGroupArgument <- input + output := <-m.createLogGroupResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { + m.createLogStreamArgument <- input + output := <-m.createLogStreamResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + events := make([]*cloudwatchlogs.InputLogEvent, len(input.LogEvents)) + copy(events, input.LogEvents) + m.putLogEventsArgument <- &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: input.SequenceToken, + LogGroupName: input.LogGroupName, + LogStreamName: input.LogStreamName, + } + output := <-m.putLogEventsResult + return output.successResult, output.errorResult +} + +type mockmetadataclient struct { + regionResult chan *regionResult +} + +type regionResult struct { + successResult string + errorResult error +} + +func newMockMetadataClient() *mockmetadataclient { + return &mockmetadataclient{ + regionResult: make(chan *regionResult, 1), + } +} + +func (m *mockmetadataclient) Region() (string, error) { + output := <-m.regionResult + return output.successResult, output.errorResult +} diff --git a/daemon/logger/copier.go b/daemon/logger/copier.go new file mode 100644 index 0000000..65d8fb1 --- /dev/null +++ b/daemon/logger/copier.go @@ -0,0 +1,135 @@ +package logger + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + bufSize = 16 * 1024 + readSize = 2 * 1024 +) + +// Copier can copy logs from specified sources to Logger and attach Timestamp. +// Writes are concurrent, so you need implement some sync in your logger. +type Copier struct { + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closeOnce sync.Once + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + buf := make([]byte, bufSize) + n := 0 + eof := false + + for { + select { + case <-c.closed: + return + default: + // Work out how much more data we are okay with reading this time. + upto := n + readSize + if upto > cap(buf) { + upto = cap(buf) + } + // Try to read that data. + if upto > n { + read, err := src.Read(buf[n:upto]) + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + return + } + eof = true + } + n += read + } + // If we have no data to log, and there's no more coming, we're done. + if n == 0 && eof { + return + } + // Break up the data that we've buffered up into lines, and log each in turn. + p := 0 + for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') { + select { + case <-c.closed: + return + default: + msg := NewMessage() + msg.Source = name + msg.Timestamp = time.Now().UTC() + msg.Line = append(msg.Line, buf[p:p+q]...) + + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + } + p += q + 1 + } + // If there's no more coming, or the buffer is full but + // has no newlines, log whatever we haven't logged yet, + // noting that it's a partial log line. + if eof || (p == 0 && n == len(buf)) { + if p < n { + msg := NewMessage() + msg.Source = name + msg.Timestamp = time.Now().UTC() + msg.Line = append(msg.Line, buf[p:n]...) + msg.Partial = true + + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + p = 0 + n = 0 + } + if eof { + return + } + } + // Move any unlogged data to the front of the buffer in preparation for another read. + if p > 0 { + copy(buf[0:], buf[p:n]) + n -= p + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} diff --git a/daemon/logger/copier_test.go b/daemon/logger/copier_test.go new file mode 100644 index 0000000..4210022 --- /dev/null +++ b/daemon/logger/copier_test.go @@ -0,0 +1,296 @@ +package logger + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" +) + +type TestLoggerJSON struct { + *json.Encoder + mu sync.Mutex + delay time.Duration +} + +func (l *TestLoggerJSON) Log(m *Message) error { + if l.delay > 0 { + time.Sleep(l.delay) + } + l.mu.Lock() + defer l.mu.Unlock() + return l.Encode(m) +} + +func (l *TestLoggerJSON) Close() error { return nil } + +func (l *TestLoggerJSON) Name() string { return "json" } + +func TestCopier(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + stderrLine := "Line that thinks that it is log line from docker stderr" + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { + t.Fatal(err) + } + } + + // Test remaining lines without line-endings + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stdoutLine, stdoutTrailingLine) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stderrLine, stderrTrailingLine) + } + } + } +} + +// TestCopierLongLines tests long lines without line breaks +func TestCopierLongLines(t *testing.T) { + // Long lines (should be split at "bufSize") + const bufSize = 16 * 1024 + stdoutLongLine := strings.Repeat("a", bufSize) + stderrLongLine := strings.Repeat("b", bufSize) + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + + for i := 0; i < 3; i++ { + if _, err := stdout.WriteString(stdoutLongLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLongLine); err != nil { + t.Fatal(err) + } + } + + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLongLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stdoutLongLine' or 'stdoutTrailingLine'", msg.Line) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLongLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stderrLongLine' or 'stderrTrailingLine'", msg.Line) + } + } + } +} + +func TestCopierSlow(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + var stdout bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + } + + var jsonBuf bytes.Buffer + //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} + + c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + <-time.After(150 * time.Millisecond) + c.Close() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("failed to exit in time after the copier is closed") + case <-wait: + } +} + +type BenchmarkLoggerDummy struct { +} + +func (l *BenchmarkLoggerDummy) Log(m *Message) error { PutMessage(m); return nil } + +func (l *BenchmarkLoggerDummy) Close() error { return nil } + +func (l *BenchmarkLoggerDummy) Name() string { return "dummy" } + +func BenchmarkCopier64(b *testing.B) { + benchmarkCopier(b, 1<<6) +} +func BenchmarkCopier128(b *testing.B) { + benchmarkCopier(b, 1<<7) +} +func BenchmarkCopier256(b *testing.B) { + benchmarkCopier(b, 1<<8) +} +func BenchmarkCopier512(b *testing.B) { + benchmarkCopier(b, 1<<9) +} +func BenchmarkCopier1K(b *testing.B) { + benchmarkCopier(b, 1<<10) +} +func BenchmarkCopier2K(b *testing.B) { + benchmarkCopier(b, 1<<11) +} +func BenchmarkCopier4K(b *testing.B) { + benchmarkCopier(b, 1<<12) +} +func BenchmarkCopier8K(b *testing.B) { + benchmarkCopier(b, 1<<13) +} +func BenchmarkCopier16K(b *testing.B) { + benchmarkCopier(b, 1<<14) +} +func BenchmarkCopier32K(b *testing.B) { + benchmarkCopier(b, 1<<15) +} +func BenchmarkCopier64K(b *testing.B) { + benchmarkCopier(b, 1<<16) +} +func BenchmarkCopier128K(b *testing.B) { + benchmarkCopier(b, 1<<17) +} +func BenchmarkCopier256K(b *testing.B) { + benchmarkCopier(b, 1<<18) +} + +func piped(b *testing.B, iterations int, delay time.Duration, buf []byte) io.Reader { + r, w, err := os.Pipe() + if err != nil { + b.Fatal(err) + return nil + } + go func() { + for i := 0; i < iterations; i++ { + time.Sleep(delay) + if n, err := w.Write(buf); err != nil || n != len(buf) { + if err != nil { + b.Fatal(err) + } + b.Fatal(fmt.Errorf("short write")) + } + } + w.Close() + }() + return r +} + +func benchmarkCopier(b *testing.B, length int) { + b.StopTimer() + buf := []byte{'A'} + for len(buf) < length { + buf = append(buf, buf...) + } + buf = append(buf[:length-1], []byte{'\n'}...) + b.StartTimer() + for i := 0; i < b.N; i++ { + c := NewCopier( + map[string]io.Reader{ + "buffer": piped(b, 10, time.Nanosecond, buf), + }, + &BenchmarkLoggerDummy{}) + c.Run() + c.Wait() + c.Close() + } +} diff --git a/daemon/logger/etwlogs/etwlogs_windows.go b/daemon/logger/etwlogs/etwlogs_windows.go new file mode 100644 index 0000000..cc44d51 --- /dev/null +++ b/daemon/logger/etwlogs/etwlogs_windows.go @@ -0,0 +1,169 @@ +// Package etwlogs provides a log driver for forwarding container logs +// as ETW events.(ETW stands for Event Tracing for Windows) +// A client can then create an ETW listener to listen for events that are sent +// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". +// Here is an example of how to do this using the logman utility: +// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl +// 2. Run container(s) and generate log messages +// 3. logman stop -ets DockerContainerLogs +// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl +// +// Each container log message generates an ETW event that also contains: +// the container name and ID, the timestamp, and the stream type. +package etwlogs + +import ( + "errors" + "fmt" + "sync" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "golang.org/x/sys/windows" +) + +type etwLogs struct { + containerName string + imageName string + containerID string + imageID string +} + +const ( + name = "etwlogs" + win32CallSuccess = 0 +) + +var ( + modAdvapi32 = windows.NewLazySystemDLL("Advapi32.dll") + procEventRegister = modAdvapi32.NewProc("EventRegister") + procEventWriteString = modAdvapi32.NewProc("EventWriteString") + procEventUnregister = modAdvapi32.NewProc("EventUnregister") +) +var providerHandle syscall.Handle +var refCount int +var mu sync.Mutex + +func init() { + providerHandle = syscall.InvalidHandle + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } +} + +// New creates a new etwLogs logger for the given container and registers the EWT provider. +func New(info logger.Info) (logger.Logger, error) { + if err := registerETWProvider(); err != nil { + return nil, err + } + logrus.Debugf("logging driver etwLogs configured for container: %s.", info.ContainerID) + + return &etwLogs{ + containerName: info.Name(), + imageName: info.ContainerImageName, + containerID: info.ContainerID, + imageID: info.ContainerImageID, + }, nil +} + +// Log logs the message to the ETW stream. +func (etwLogger *etwLogs) Log(msg *logger.Message) error { + if providerHandle == syscall.InvalidHandle { + // This should never be hit, if it is, it indicates a programming error. + errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + m := createLogMessage(etwLogger, msg) + logger.PutMessage(msg) + return callEventWriteString(m) +} + +// Close closes the logger by unregistering the ETW provider. +func (etwLogger *etwLogs) Close() error { + unregisterETWProvider() + return nil +} + +func (etwLogger *etwLogs) Name() string { + return name +} + +func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { + return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", + etwLogger.containerName, + etwLogger.imageName, + etwLogger.containerID, + etwLogger.imageID, + msg.Source, + msg.Line) +} + +func registerETWProvider() error { + mu.Lock() + defer mu.Unlock() + if refCount == 0 { + var err error + if err = callEventRegister(); err != nil { + return err + } + } + + refCount++ + return nil +} + +func unregisterETWProvider() { + mu.Lock() + defer mu.Unlock() + if refCount == 1 { + if callEventUnregister() { + refCount-- + providerHandle = syscall.InvalidHandle + } + // Not returning an error if EventUnregister fails, because etwLogs will continue to work + } else { + refCount-- + } +} + +func callEventRegister() error { + // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} + guid := syscall.GUID{ + Data1: 0xa3693192, + Data2: 0x9ed6, + Data3: 0x46d2, + Data4: [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, + } + + ret, _, _ := procEventRegister.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventWriteString(message string) error { + utf16message, err := syscall.UTF16FromString(message) + + if err != nil { + return err + } + + ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(&utf16message[0]))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventUnregister() bool { + ret, _, _ := procEventUnregister.Call(uintptr(providerHandle)) + return ret == win32CallSuccess +} diff --git a/daemon/logger/factory.go b/daemon/logger/factory.go new file mode 100644 index 0000000..3200159 --- /dev/null +++ b/daemon/logger/factory.go @@ -0,0 +1,162 @@ +package logger + +import ( + "fmt" + "sort" + "sync" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/plugingetter" + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Info) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) list() []string { + ls := make([]string, 0, len(lf.registry)) + lf.m.Lock() + for name := range lf.registry { + ls = append(ls, name) + } + lf.m.Unlock() + sort.Strings(ls) + return ls +} + +// ListDrivers gets the list of registered log driver names +func ListDrivers() []string { + return factory.list() +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + if lf.driverRegistered(name) { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + + lf.m.Lock() + lf.registry[name] = c + lf.m.Unlock() + return nil +} + +func (lf *logdriverFactory) driverRegistered(name string) bool { + lf.m.Lock() + _, ok := lf.registry[name] + lf.m.Unlock() + if !ok { + if pluginGetter != nil { // this can be nil when the init functions are running + if l, _ := getPlugin(name, plugingetter.Lookup); l != nil { + return true + } + } + } + return ok +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if ok { + return c, nil + } + + c, err := getPlugin(name, plugingetter.Acquire) + return c, errors.Wrapf(err, "logger: no log driver named '%s' is registered", name) +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +var builtInLogOpts = map[string]bool{ + "mode": true, + "max-buffer-size": true, +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + if name == "none" { + return nil + } + + switch containertypes.LogMode(cfg["mode"]) { + case containertypes.LogModeBlocking, containertypes.LogModeNonBlock, containertypes.LogModeUnset: + default: + return fmt.Errorf("logger: logging mode not supported: %s", cfg["mode"]) + } + + if s, ok := cfg["max-buffer-size"]; ok { + if containertypes.LogMode(cfg["mode"]) != containertypes.LogModeNonBlock { + return fmt.Errorf("logger: max-buffer-size option is only supported with 'mode=%s'", containertypes.LogModeNonBlock) + } + if _, err := units.RAMInBytes(s); err != nil { + return errors.Wrap(err, "error parsing option max-buffer-size") + } + } + + if !factory.driverRegistered(name) { + return fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + + filteredOpts := make(map[string]string, len(builtInLogOpts)) + for k, v := range cfg { + if !builtInLogOpts[k] { + filteredOpts[k] = v + } + } + + validator := factory.getLogOptValidator(name) + if validator != nil { + return validator(filteredOpts) + } + return nil +} diff --git a/daemon/logger/fluentd/fluentd.go b/daemon/logger/fluentd/fluentd.go new file mode 100644 index 0000000..23465b3 --- /dev/null +++ b/daemon/logger/fluentd/fluentd.go @@ -0,0 +1,250 @@ +// Package fluentd provides the log driver for forwarding server logs +// to fluentd endpoints. +package fluentd + +import ( + "fmt" + "math" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-units" + "github.com/fluent/fluent-logger-golang/fluent" + "github.com/pkg/errors" +) + +type fluentd struct { + tag string + containerID string + containerName string + writer *fluent.Fluent + extra map[string]string +} + +type location struct { + protocol string + host string + port int + path string +} + +const ( + name = "fluentd" + + defaultProtocol = "tcp" + defaultHost = "127.0.0.1" + defaultPort = 24224 + defaultBufferLimit = 1024 * 1024 + + // logger tries to reconnect 2**32 - 1 times + // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] + defaultRetryWait = 1000 + defaultMaxRetries = math.MaxInt32 + + addressKey = "fluentd-address" + bufferLimitKey = "fluentd-buffer-limit" + retryWaitKey = "fluentd-retry-wait" + maxRetriesKey = "fluentd-max-retries" + asyncConnectKey = "fluentd-async-connect" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a fluentd logger using the configuration passed in on +// the context. The supported context configuration variable is +// fluentd-address. +func New(info logger.Info) (logger.Logger, error) { + loc, err := parseAddress(info.Config[addressKey]) + if err != nil { + return nil, err + } + + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + bufferLimit := defaultBufferLimit + if info.Config[bufferLimitKey] != "" { + bl64, err := units.RAMInBytes(info.Config[bufferLimitKey]) + if err != nil { + return nil, err + } + bufferLimit = int(bl64) + } + + retryWait := defaultRetryWait + if info.Config[retryWaitKey] != "" { + rwd, err := time.ParseDuration(info.Config[retryWaitKey]) + if err != nil { + return nil, err + } + retryWait = int(rwd.Seconds() * 1000) + } + + maxRetries := defaultMaxRetries + if info.Config[maxRetriesKey] != "" { + mr64, err := strconv.ParseUint(info.Config[maxRetriesKey], 10, strconv.IntSize) + if err != nil { + return nil, err + } + maxRetries = int(mr64) + } + + asyncConnect := false + if info.Config[asyncConnectKey] != "" { + if asyncConnect, err = strconv.ParseBool(info.Config[asyncConnectKey]); err != nil { + return nil, err + } + } + + fluentConfig := fluent.Config{ + FluentPort: loc.port, + FluentHost: loc.host, + FluentNetwork: loc.protocol, + FluentSocketPath: loc.path, + BufferLimit: bufferLimit, + RetryWait: retryWait, + MaxRetry: maxRetries, + AsyncConnect: asyncConnect, + } + + logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). + Debug("logging driver fluentd configured") + + log, err := fluent.New(fluentConfig) + if err != nil { + return nil, err + } + return &fluentd{ + tag: tag, + containerID: info.ContainerID, + containerName: info.ContainerName, + writer: log, + extra: extra, + }, nil +} + +func (f *fluentd) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + + ts := msg.Timestamp + logger.PutMessage(msg) + // fluent-logger-golang buffers logs from failures and disconnections, + // and these are transferred again automatically. + return f.writer.PostWithTime(f.tag, ts, data) +} + +func (f *fluentd) Close() error { + return f.writer.Close() +} + +func (f *fluentd) Name() string { + return name +} + +// ValidateLogOpt looks for fluentd specific log option fluentd-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "tag": + case addressKey: + case bufferLimitKey: + case retryWaitKey: + case maxRetriesKey: + case asyncConnectKey: + // Accepted + default: + return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) + } + } + + _, err := parseAddress(cfg["fluentd-address"]) + return err +} + +func parseAddress(address string) (*location, error) { + if address == "" { + return &location{ + protocol: defaultProtocol, + host: defaultHost, + port: defaultPort, + path: "", + }, nil + } + + protocol := defaultProtocol + givenAddress := address + if urlutil.IsTransportURL(address) { + url, err := url.Parse(address) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + // unix and unixgram socket + if url.Scheme == "unix" || url.Scheme == "unixgram" { + return &location{ + protocol: url.Scheme, + host: "", + port: 0, + path: url.Path, + }, nil + } + // tcp|udp + protocol = url.Scheme + address = url.Host + } + + host, port, err := net.SplitHostPort(address) + if err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: defaultPort, + path: "", + }, nil + } + + portnum, err := strconv.Atoi(port) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: portnum, + path: "", + }, nil +} diff --git a/daemon/logger/gcplogs/gcplogging.go b/daemon/logger/gcplogs/gcplogging.go new file mode 100644 index 0000000..3a48067 --- /dev/null +++ b/daemon/logger/gcplogs/gcplogging.go @@ -0,0 +1,244 @@ +package gcplogs + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/docker/docker/daemon/logger" + + "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/logging" + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +const ( + name = "gcplogs" + + projectOptKey = "gcp-project" + logLabelsKey = "labels" + logEnvKey = "env" + logEnvRegexKey = "env-regex" + logCmdKey = "gcp-log-cmd" + logZoneKey = "gcp-meta-zone" + logNameKey = "gcp-meta-name" + logIDKey = "gcp-meta-id" +) + +var ( + // The number of logs the gcplogs driver has dropped. + droppedLogs uint64 + + onGCE bool + + // instance metadata populated from the metadata server if available + projectID string + zone string + instanceName string + instanceID string +) + +func init() { + + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + + if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { + logrus.Fatal(err) + } +} + +type gcplogs struct { + logger *logging.Logger + instance *instanceInfo + container *containerInfo +} + +type dockerLogEntry struct { + Instance *instanceInfo `json:"instance,omitempty"` + Container *containerInfo `json:"container,omitempty"` + Data string `json:"data,omitempty"` +} + +type instanceInfo struct { + Zone string `json:"zone,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type containerInfo struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + ImageName string `json:"imageName,omitempty"` + ImageID string `json:"imageId,omitempty"` + Created time.Time `json:"created,omitempty"` + Command string `json:"command,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +var initGCPOnce sync.Once + +func initGCP() { + initGCPOnce.Do(func() { + onGCE = metadata.OnGCE() + if onGCE { + // These will fail on instances if the metadata service is + // down or the client is compiled with an API version that + // has been removed. Since these are not vital, let's ignore + // them and make their fields in the dockerLogEntry ,omitempty + projectID, _ = metadata.ProjectID() + zone, _ = metadata.Zone() + instanceName, _ = metadata.InstanceName() + instanceID, _ = metadata.InstanceID() + } + }) +} + +// New creates a new logger that logs to Google Cloud Logging using the application +// default credentials. +// +// See https://developers.google.com/identity/protocols/application-default-credentials +func New(info logger.Info) (logger.Logger, error) { + initGCP() + + var project string + if projectID != "" { + project = projectID + } + if projectID, found := info.Config[projectOptKey]; found { + project = projectID + } + if project == "" { + return nil, fmt.Errorf("No project was specified and couldn't read project from the metadata server. Please specify a project") + } + + // Issue #29344: gcplogs segfaults (static binary) + // If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. + // However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed + // in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) + // So we forcibly set HOME so as to avoid call to os/user/Current() + if err := ensureHomeIfIAmStatic(); err != nil { + return nil, err + } + + c, err := logging.NewClient(context.Background(), project) + if err != nil { + return nil, err + } + var instanceResource *instanceInfo + if onGCE { + instanceResource = &instanceInfo{ + Zone: zone, + Name: instanceName, + ID: instanceID, + } + } else if info.Config[logZoneKey] != "" || info.Config[logNameKey] != "" || info.Config[logIDKey] != "" { + instanceResource = &instanceInfo{ + Zone: info.Config[logZoneKey], + Name: info.Config[logNameKey], + ID: info.Config[logIDKey], + } + } + + options := []logging.LoggerOption{} + if instanceResource != nil { + vmMrpb := logging.CommonResource( + &mrpb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{ + "instance_id": instanceResource.ID, + "zone": instanceResource.Zone, + }, + }, + ) + options = []logging.LoggerOption{vmMrpb} + } + lg := c.Logger("gcplogs-docker-driver", options...) + + if err := c.Ping(context.Background()); err != nil { + return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) + } + + extraAttributes, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + l := &gcplogs{ + logger: lg, + container: &containerInfo{ + Name: info.ContainerName, + ID: info.ContainerID, + ImageName: info.ContainerImageName, + ImageID: info.ContainerImageID, + Created: info.ContainerCreated, + Metadata: extraAttributes, + }, + } + + if info.Config[logCmdKey] == "true" { + l.container.Command = info.Command() + } + + if instanceResource != nil { + l.instance = instanceResource + } + + // The logger "overflows" at a rate of 10,000 logs per second and this + // overflow func is called. We want to surface the error to the user + // without overly spamming /var/log/docker.log so we log the first time + // we overflow and every 1000th time after. + c.OnError = func(err error) { + if err == logging.ErrOverflow { + if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { + logrus.Errorf("gcplogs driver has dropped %v logs", i) + } + } else { + logrus.Error(err) + } + } + + return l, nil +} + +// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs +// driver doesn't take any arguments. +func ValidateLogOpts(cfg map[string]string) error { + for k := range cfg { + switch k { + case projectOptKey, logLabelsKey, logEnvKey, logEnvRegexKey, logCmdKey, logZoneKey, logNameKey, logIDKey: + default: + return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) + } + } + return nil +} + +func (l *gcplogs) Log(m *logger.Message) error { + data := string(m.Line) + ts := m.Timestamp + logger.PutMessage(m) + + l.logger.Log(logging.Entry{ + Timestamp: ts, + Payload: &dockerLogEntry{ + Instance: l.instance, + Container: l.container, + Data: data, + }, + }) + return nil +} + +func (l *gcplogs) Close() error { + l.logger.Flush() + return nil +} + +func (l *gcplogs) Name() string { + return name +} diff --git a/daemon/logger/gcplogs/gcplogging_linux.go b/daemon/logger/gcplogs/gcplogging_linux.go new file mode 100644 index 0000000..9af8b3c --- /dev/null +++ b/daemon/logger/gcplogs/gcplogging_linux.go @@ -0,0 +1,31 @@ +// +build linux + +package gcplogs + +import ( + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" +) + +// ensureHomeIfIAmStatic ensure $HOME to be set if dockerversion.IAmStatic is "true". +// See issue #29344: gcplogs segfaults (static binary) +// If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. +// However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed +// in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +// So we forcibly set HOME so as to avoid call to os/user/Current() +func ensureHomeIfIAmStatic() error { + // Note: dockerversion.IAmStatic and homedir.GetStatic() is only available for linux. + // So we need to use them in this gcplogging_linux.go rather than in gcplogging.go + if dockerversion.IAmStatic == "true" && os.Getenv("HOME") == "" { + home, err := homedir.GetStatic() + if err != nil { + return err + } + logrus.Warnf("gcplogs requires HOME to be set for static daemon binary. Forcibly setting HOME to %s.", home) + os.Setenv("HOME", home) + } + return nil +} diff --git a/daemon/logger/gcplogs/gcplogging_others.go b/daemon/logger/gcplogs/gcplogging_others.go new file mode 100644 index 0000000..45e3b8d --- /dev/null +++ b/daemon/logger/gcplogs/gcplogging_others.go @@ -0,0 +1,7 @@ +// +build !linux + +package gcplogs + +func ensureHomeIfIAmStatic() error { + return nil +} diff --git a/daemon/logger/gelf/gelf.go b/daemon/logger/gelf/gelf.go new file mode 100644 index 0000000..4b0130d --- /dev/null +++ b/daemon/logger/gelf/gelf.go @@ -0,0 +1,209 @@ +// +build linux + +// Package gelf provides the log driver for forwarding server logs to +// endpoints that support the Graylog Extended Log Format. +package gelf + +import ( + "compress/flate" + "encoding/json" + "fmt" + "net" + "net/url" + "strconv" + "time" + + "github.com/Graylog2/go-gelf/gelf" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const name = "gelf" + +type gelfLogger struct { + writer *gelf.Writer + info logger.Info + hostname string + rawExtra json.RawMessage +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a gelf logger using the configuration passed in on the +// context. The supported context configuration variable is gelf-address. +func New(info logger.Info) (logger.Logger, error) { + // parse gelf address + address, err := parseAddress(info.Config["gelf-address"]) + if err != nil { + return nil, err + } + + // collect extra data for GELF message + hostname, err := info.Hostname() + if err != nil { + return nil, fmt.Errorf("gelf: cannot access hostname to set source field") + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := map[string]interface{}{ + "_container_id": info.ContainerID, + "_container_name": info.Name(), + "_image_id": info.ContainerImageID, + "_image_name": info.ContainerImageName, + "_command": info.Command(), + "_tag": tag, + "_created": info.ContainerCreated, + } + + extraAttrs, err := info.ExtraAttributes(func(key string) string { + if key[0] == '_' { + return key + } + return "_" + key + }) + + if err != nil { + return nil, err + } + + for k, v := range extraAttrs { + extra[k] = v + } + + rawExtra, err := json.Marshal(extra) + if err != nil { + return nil, err + } + + // create new gelfWriter + gelfWriter, err := gelf.NewWriter(address) + if err != nil { + return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) + } + + if v, ok := info.Config["gelf-compression-type"]; ok { + switch v { + case "gzip": + gelfWriter.CompressionType = gelf.CompressGzip + case "zlib": + gelfWriter.CompressionType = gelf.CompressZlib + case "none": + gelfWriter.CompressionType = gelf.CompressNone + default: + return nil, fmt.Errorf("gelf: invalid compression type %q", v) + } + } + + if v, ok := info.Config["gelf-compression-level"]; ok { + val, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) + } + gelfWriter.CompressionLevel = val + } + + return &gelfLogger{ + writer: gelfWriter, + info: info, + hostname: hostname, + rawExtra: rawExtra, + }, nil +} + +func (s *gelfLogger) Log(msg *logger.Message) error { + level := gelf.LOG_INFO + if msg.Source == "stderr" { + level = gelf.LOG_ERR + } + + m := gelf.Message{ + Version: "1.1", + Host: s.hostname, + Short: string(msg.Line), + TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, + Level: level, + RawExtra: s.rawExtra, + } + logger.PutMessage(msg) + + if err := s.writer.WriteMessage(&m); err != nil { + return fmt.Errorf("gelf: cannot send GELF message: %v", err) + } + return nil +} + +func (s *gelfLogger) Close() error { + return s.writer.Close() +} + +func (s *gelfLogger) Name() string { + return name +} + +// ValidateLogOpt looks for gelf specific log option gelf-address. +func ValidateLogOpt(cfg map[string]string) error { + for key, val := range cfg { + switch key { + case "gelf-address": + case "tag": + case "labels": + case "env": + case "env-regex": + case "gelf-compression-level": + i, err := strconv.Atoi(val) + if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + case "gelf-compression-type": + switch val { + case "gzip", "zlib", "none": + default: + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + default: + return fmt.Errorf("unknown log opt %q for gelf log driver", key) + } + } + + _, err := parseAddress(cfg["gelf-address"]) + return err +} + +func parseAddress(address string) (string, error) { + if address == "" { + return "", nil + } + if !urlutil.IsTransportURL(address) { + return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", err + } + + // we support only udp + if url.Scheme != "udp" { + return "", fmt.Errorf("gelf: endpoint needs to be UDP") + } + + // get host and port + if _, _, err = net.SplitHostPort(url.Host); err != nil { + return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") + } + + return url.Host, nil +} diff --git a/daemon/logger/gelf/gelf_unsupported.go b/daemon/logger/gelf/gelf_unsupported.go new file mode 100644 index 0000000..266f73b --- /dev/null +++ b/daemon/logger/gelf/gelf_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package gelf diff --git a/daemon/logger/journald/journald.go b/daemon/logger/journald/journald.go new file mode 100644 index 0000000..86d7378 --- /dev/null +++ b/daemon/logger/journald/journald.go @@ -0,0 +1,126 @@ +// +build linux + +// Package journald provides the log driver for forwarding server logs +// to endpoints that receive the systemd format. +package journald + +import ( + "fmt" + "sync" + "unicode" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" +) + +const name = "journald" + +type journald struct { + mu sync.Mutex + vars map[string]string // additional variables and values to send to the journal along with the log message + readers readerList + closed bool +} + +type readerList struct { + readers map[*logger.LogWatcher]*logger.LogWatcher +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// sanitizeKeyMode returns the sanitized string so that it could be used in journald. +// In journald log, there are special requirements for fields. +// Fields must be composed of uppercase letters, numbers, and underscores, but must +// not start with an underscore. +func sanitizeKeyMod(s string) string { + n := "" + for _, v := range s { + if 'a' <= v && v <= 'z' { + v = unicode.ToUpper(v) + } else if ('Z' < v || v < 'A') && ('9' < v || v < '0') { + v = '_' + } + // If (n == "" && v == '_'), then we will skip as this is the beginning with '_' + if !(n == "" && v == '_') { + n += string(v) + } + } + return n +} + +// New creates a journald logger using the configuration passed in on +// the context. +func New(info logger.Info) (logger.Logger, error) { + if !journal.Enabled() { + return nil, fmt.Errorf("journald is not enabled on this host") + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + vars := map[string]string{ + "CONTAINER_ID": info.ContainerID[:12], + "CONTAINER_ID_FULL": info.ContainerID, + "CONTAINER_NAME": info.Name(), + "CONTAINER_TAG": tag, + } + extraAttrs, err := info.ExtraAttributes(sanitizeKeyMod) + if err != nil { + return nil, err + } + for k, v := range extraAttrs { + vars[k] = v + } + return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil +} + +// We don't actually accept any options, but we have to supply a callback for +// the factory to pass the (probably empty) configuration map to. +func validateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "labels": + case "env": + case "env-regex": + case "tag": + default: + return fmt.Errorf("unknown log opt '%s' for journald log driver", key) + } + } + return nil +} + +func (s *journald) Log(msg *logger.Message) error { + vars := map[string]string{} + for k, v := range s.vars { + vars[k] = v + } + if msg.Partial { + vars["CONTAINER_PARTIAL_MESSAGE"] = "true" + } + + line := string(msg.Line) + source := msg.Source + logger.PutMessage(msg) + + if source == "stderr" { + return journal.Send(line, journal.PriErr, vars) + } + return journal.Send(line, journal.PriInfo, vars) +} + +func (s *journald) Name() string { + return name +} diff --git a/daemon/logger/journald/journald_test.go b/daemon/logger/journald/journald_test.go new file mode 100644 index 0000000..224423f --- /dev/null +++ b/daemon/logger/journald/journald_test.go @@ -0,0 +1,23 @@ +// +build linux + +package journald + +import ( + "testing" +) + +func TestSanitizeKeyMod(t *testing.T) { + entries := map[string]string{ + "io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io?.kubernetes.pod.name": "IO__KUBERNETES_POD_NAME", + "?io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "_io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "__io123_kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + } + for k, v := range entries { + if sanitizeKeyMod(k) != v { + t.Fatalf("Failed to sanitize %s, got %s, expected %s", k, sanitizeKeyMod(k), v) + } + } +} diff --git a/daemon/logger/journald/journald_unsupported.go b/daemon/logger/journald/journald_unsupported.go new file mode 100644 index 0000000..d52ca92 --- /dev/null +++ b/daemon/logger/journald/journald_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package journald + +type journald struct { +} diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go new file mode 100644 index 0000000..9ecc3b5 --- /dev/null +++ b/daemon/logger/journald/read.go @@ -0,0 +1,425 @@ +// +build linux,cgo,!static_build,journald + +package journald + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// +//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial) +//{ +// int rc; +// size_t plength; +// *msg = NULL; +// *length = 0; +// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true"); +// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length); +// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0)); +// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); +// if (rc == 0) { +// if (*length > 8) { +// (*msg) += 8; +// *length -= 8; +// } else { +// *msg = NULL; +// *length = 0; +// rc = -ENOENT; +// } +// } +// return rc; +//} +//static int get_priority(sd_journal *j, int *priority) +//{ +// const void *data; +// size_t i, length; +// int rc; +// *priority = -1; +// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); +// if (rc == 0) { +// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { +// *priority = 0; +// for (i = 9; i < length; i++) { +// *priority = *priority * 10 + ((const char *)data)[i] - '0'; +// } +// if (length > 9) { +// rc = 0; +// } +// } +// } +// return rc; +//} +//static int is_attribute_field(const char *msg, size_t length) +//{ +// static const struct known_field { +// const char *name; +// size_t length; +// } fields[] = { +// {"MESSAGE", sizeof("MESSAGE") - 1}, +// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, +// {"PRIORITY", sizeof("PRIORITY") - 1}, +// {"CODE_FILE", sizeof("CODE_FILE") - 1}, +// {"CODE_LINE", sizeof("CODE_LINE") - 1}, +// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, +// {"ERRNO", sizeof("ERRNO") - 1}, +// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, +// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, +// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, +// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, +// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, +// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, +// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, +// }; +// unsigned int i; +// void *p; +// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { +// return -1; +// } +// length = ((const char *) p) - msg; +// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { +// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { +// return -1; +// } +// } +// return 0; +//} +//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) +//{ +// int rc; +// *msg = NULL; +// *length = 0; +// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { +// if (is_attribute_field(*msg, *length) == 0) { +// break; +// } +// rc = -ENOENT; +// } +// return rc; +//} +//static int wait_for_data_cancelable(sd_journal *j, int pipefd) +//{ +// struct pollfd fds[2]; +// uint64_t when = 0; +// int timeout, jevents, i; +// struct timespec ts; +// uint64_t now; +// +// memset(&fds, 0, sizeof(fds)); +// fds[0].fd = pipefd; +// fds[0].events = POLLHUP; +// fds[1].fd = sd_journal_get_fd(j); +// if (fds[1].fd < 0) { +// return fds[1].fd; +// } +// +// do { +// jevents = sd_journal_get_events(j); +// if (jevents < 0) { +// return jevents; +// } +// fds[1].events = jevents; +// sd_journal_get_timeout(j, &when); +// if (when == -1) { +// timeout = -1; +// } else { +// clock_gettime(CLOCK_MONOTONIC, &ts); +// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; +// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; +// } +// i = poll(fds, 2, timeout); +// if ((i == -1) && (errno != EINTR)) { +// /* An unexpected error. */ +// return (errno != 0) ? -errno : -EINTR; +// } +// if (fds[0].revents & POLLHUP) { +// /* The close notification pipe was closed. */ +// return 0; +// } +// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { +// /* Data, which we might care about, was appended. */ +// return 1; +// } +// } while ((fds[0].revents & POLLHUP) == 0); +// return 0; +//} +import "C" + +import ( + "fmt" + "strings" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" +) + +func (s *journald) Close() error { + s.mu.Lock() + s.closed = true + for reader := range s.readers.readers { + reader.Close() + } + s.mu.Unlock() + return nil +} + +func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor *C.char) *C.char { + var msg, data, cursor *C.char + var length C.size_t + var stamp C.uint64_t + var priority, partial C.int + + // Walk the journal from here forward until we run out of new entries. +drain: + for { + // Try not to send a given entry twice. + if oldCursor != nil { + for C.sd_journal_test_cursor(j, oldCursor) > 0 { + if C.sd_journal_next(j) <= 0 { + break drain + } + } + } + // Read and send the logged message, if there is one to read. + i := C.get_message(j, &msg, &length, &partial) + if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { + // Read the entry's timestamp. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } + // Set up the time and text of the entry. + timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) + line := C.GoBytes(unsafe.Pointer(msg), C.int(length)) + if partial == 0 { + line = append(line, "\n"...) + } + // Recover the stream name by mapping + // from the journal priority back to + // the stream that we would have + // assigned that value. + source := "" + if C.get_priority(j, &priority) != 0 { + source = "" + } else if priority == C.int(journal.PriErr) { + source = "stderr" + } else if priority == C.int(journal.PriInfo) { + source = "stdout" + } + // Retrieve the values of any variables we're adding to the journal. + attrs := make(map[string]string) + C.sd_journal_restart_data(j) + for C.get_attribute_field(j, &data, &length) > C.int(0) { + kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) + attrs[kv[0]] = kv[1] + } + if len(attrs) == 0 { + attrs = nil + } + // Send the log message. + logWatcher.Msg <- &logger.Message{ + Line: line, + Source: source, + Timestamp: timestamp.In(time.UTC), + Attrs: attrs, + } + } + // If we're at the end of the journal, we're done (for now). + if C.sd_journal_next(j) <= 0 { + break + } + } + + // free(NULL) is safe + C.free(unsafe.Pointer(oldCursor)) + if C.sd_journal_get_cursor(j, &cursor) != 0 { + // ensure that we won't be freeing an address that's invalid + cursor = nil + } + return cursor +} + +func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char { + s.mu.Lock() + s.readers.readers[logWatcher] = logWatcher + if s.closed { + // the journald Logger is closed, presumably because the container has been + // reset. So we shouldn't follow, because we'll never be woken up. But we + // should make one more drainJournal call to be sure we've got all the logs. + // Close pfd[1] so that one drainJournal happens, then cleanup, then return. + C.close(pfd[1]) + } + s.mu.Unlock() + + newCursor := make(chan *C.char) + + go func() { + for { + // Keep copying journal data out until we're notified to stop + // or we hit an error. + status := C.wait_for_data_cancelable(j, pfd[0]) + if status < 0 { + cerrstr := C.strerror(C.int(-status)) + errstr := C.GoString(cerrstr) + fmtstr := "error %q while attempting to follow journal for container %q" + logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) + break + } + + cursor = s.drainJournal(logWatcher, config, j, cursor) + + if status != 1 { + // We were notified to stop + break + } + } + + // Clean up. + C.close(pfd[0]) + s.mu.Lock() + delete(s.readers.readers, logWatcher) + s.mu.Unlock() + close(logWatcher.Msg) + newCursor <- cursor + }() + + // Wait until we're told to stop. + select { + case cursor = <-newCursor: + case <-logWatcher.WatchClose(): + // Notify the other goroutine that its work is done. + C.close(pfd[1]) + cursor = <-newCursor + } + + return cursor +} + +func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + var j *C.sd_journal + var cmatch, cursor *C.char + var stamp C.uint64_t + var sinceUnixMicro uint64 + var pipes [2]C.int + + // Get a handle to the journal. + rc := C.sd_journal_open(&j, C.int(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error opening journal") + close(logWatcher.Msg) + return + } + // If we end up following the log, we can set the journal context + // pointer and the channel pointer to nil so that we won't close them + // here, potentially while the goroutine that uses them is still + // running. Otherwise, close them when we return from this function. + following := false + defer func(pfollowing *bool) { + if !*pfollowing { + close(logWatcher.Msg) + } + C.sd_journal_close(j) + }(&following) + // Remove limits on the size of data items that we'll retrieve. + rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal data threshold") + return + } + // Add a match to have the library do the searching for us. + cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) + defer C.free(unsafe.Pointer(cmatch)) + rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal match") + return + } + // If we have a cutoff time, convert it to Unix time once. + if !config.Since.IsZero() { + nano := config.Since.UnixNano() + sinceUnixMicro = uint64(nano / 1000) + } + if config.Tail > 0 { + lines := config.Tail + // Start at the end of the journal. + if C.sd_journal_seek_tail(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to end of journal") + return + } + if C.sd_journal_previous(j) < 0 { + logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") + return + } + // Walk backward. + for lines > 0 { + // Stop if the entry time is before our cutoff. + // We'll need the entry time if it isn't, so go + // ahead and parse it now. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } else { + // Compare the timestamp on the entry + // to our threshold value. + if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { + break + } + } + lines-- + // If we're at the start of the journal, or + // don't need to back up past any more entries, + // stop. + if lines == 0 || C.sd_journal_previous(j) <= 0 { + break + } + } + } else { + // Start at the beginning of the journal. + if C.sd_journal_seek_head(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start of journal") + return + } + // If we have a cutoff date, fast-forward to it. + if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") + return + } + if C.sd_journal_next(j) < 0 { + logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") + return + } + } + cursor = s.drainJournal(logWatcher, config, j, nil) + if config.Follow { + // Allocate a descriptor for following the journal, if we'll + // need one. Do it here so that we can report if it fails. + if fd := C.sd_journal_get_fd(j); fd < C.int(0) { + logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) + } else { + // Create a pipe that we can poll at the same time as + // the journald descriptor. + if C.pipe(&pipes[0]) == C.int(-1) { + logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") + } else { + cursor = s.followJournal(logWatcher, config, j, pipes, cursor) + // Let followJournal handle freeing the journal context + // object and closing the channel. + following = true + } + } + } + + C.free(unsafe.Pointer(cursor)) + return +} + +func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + go s.readLogs(logWatcher, config) + return logWatcher +} diff --git a/daemon/logger/journald/read_native.go b/daemon/logger/journald/read_native.go new file mode 100644 index 0000000..bba6de5 --- /dev/null +++ b/daemon/logger/journald/read_native.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,!journald_compat + +package journald + +// #cgo pkg-config: libsystemd +import "C" diff --git a/daemon/logger/journald/read_native_compat.go b/daemon/logger/journald/read_native_compat.go new file mode 100644 index 0000000..3f7a43c --- /dev/null +++ b/daemon/logger/journald/read_native_compat.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,journald_compat + +package journald + +// #cgo pkg-config: libsystemd-journal +import "C" diff --git a/daemon/logger/journald/read_unsupported.go b/daemon/logger/journald/read_unsupported.go new file mode 100644 index 0000000..b43abdc --- /dev/null +++ b/daemon/logger/journald/read_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux !cgo static_build !journald + +package journald + +func (s *journald) Close() error { + return nil +} diff --git a/daemon/logger/jsonfilelog/jsonfilelog.go b/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 0000000..797644e --- /dev/null +++ b/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,159 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + buf *bytes.Buffer + writer *loggerutils.RotateFileWriter + mu sync.Mutex + readers map[*logger.LogWatcher]struct{} // stores the active log followers + extra []byte // json-encoded extra attributes + closed bool +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(info logger.Info) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := info.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := info.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(info.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + if len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + l.mu.Lock() + logline := msg.Line + if !msg.Partial { + logline = append(msg.Line, '\n') + } + err = (&jsonlog.JSONLogs{ + Log: logline, + Stream: msg.Source, + Created: timestamp, + RawAttrs: l.extra, + }).MarshalJSONBuf(l.buf) + logger.PutMessage(msg) + if err != nil { + l.mu.Unlock() + return err + } + + l.buf.WriteByte('\n') + _, err = l.writer.Write(l.buf.Bytes()) + l.buf.Reset() + l.mu.Unlock() + + return err +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + case "env-regex": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + l.closed = true + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/daemon/logger/jsonfilelog/jsonfilelog_test.go b/daemon/logger/jsonfilelog/jsonfilelog_test.go new file mode 100644 index 0000000..d2d36e9 --- /dev/null +++ b/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -0,0 +1,249 @@ +package jsonfilelog + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/jsonlog" +) + +func TestJSONFileLogger(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } +} + +func BenchmarkJSONFileLogger(b *testing.B) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + + testLine := "Line that thinks that it is log line from docker\n" + msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } + } +} + +func TestJSONFileLoggerWithOpts(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"max-file": "2", "max-size": "1k"} + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + Config: config, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + for i := 0; i < 20; i++ { + if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { + t.Fatal(err) + } + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + t.Fatal(err) + } + + expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } + if string(penUlt) != expectedPenultimate { + t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) + } + +} + +func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl", "env-regex": "^dc"} + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filename, + Config: config, + ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, + ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true", "dc_region=west"}, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + if err := l.Log(&logger.Message{Line: []byte("line"), Source: "src1"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + var jsonLog jsonlog.JSONLogs + if err := json.Unmarshal(res, &jsonLog); err != nil { + t.Fatal(err) + } + extra := make(map[string]string) + if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { + t.Fatal(err) + } + expected := map[string]string{ + "rack": "101", + "dc": "lhr", + "environ": "production", + "debug": "false", + "ssl": "true", + "dc_region": "west", + } + if !reflect.DeepEqual(extra, expected) { + t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) + } +} + +func BenchmarkJSONFileLoggerWithReader(b *testing.B) { + b.StopTimer() + b.ResetTimer() + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + dir, err := ioutil.TempDir("", "json-logger-bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dir) + + l, err := New(logger.Info{ + ContainerID: cid, + LogPath: filepath.Join(dir, "container.log"), + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + msg := &logger.Message{Line: []byte("line"), Source: "src1"} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + + b.StartTimer() + + go func() { + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + l.Log(msg) + } + } + l.Close() + }() + + lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) + watchClose := lw.WatchClose() + for { + select { + case <-lw.Msg: + case <-watchClose: + return + } + } +} diff --git a/daemon/logger/jsonfilelog/multireader/multireader.go b/daemon/logger/jsonfilelog/multireader/multireader.go new file mode 100644 index 0000000..06a7307 --- /dev/null +++ b/daemon/logger/jsonfilelog/multireader/multireader.go @@ -0,0 +1,224 @@ +package multireader + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + + var offsetTo int64 + + for _, rdr := range r.readers { + size, err := getReadSeekerSize(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo+size > offset { + return rdr, offset - offsetTo, nil + } + if rdr == r.readers[len(r.readers)-1] { + return rdr, offsetTo + offset, nil + } + offsetTo += size + } + + return nil, 0, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + // make sure all readers are at 0 + r.Seek(0, os.SEEK_SET) + } + + bLen := int64(len(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bLen) + if err != nil && err != io.EOF { + return -1, err + } + bLen -= readBytes + + if bLen == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/daemon/logger/jsonfilelog/multireader/multireader_test.go b/daemon/logger/jsonfilelog/multireader/multireader_test.go new file mode 100644 index 0000000..bd59b78 --- /dev/null +++ b/daemon/logger/jsonfilelog/multireader/multireader_test.go @@ -0,0 +1,225 @@ +package multireader + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + // The positions of some readers are not 0 + s1.Seek(0, os.SEEK_SET) + s2.Seek(0, os.SEEK_END) + s3.Seek(0, os.SEEK_SET) + mr = MultiReadSeeker(s1, s2, s3) + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} + +func TestMultiReadSeekerCurAfterSet(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + mid := int64(s1.Len() + s2.Len()/2) + + size, err := mr.Seek(mid, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if size != mid { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid) + } + + size, err = mr.Seek(3, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+3 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+3) + } + size, err = mr.Seek(5, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+8 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+8) + } + + size, err = mr.Seek(10, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+18 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+18) + } +} + +func TestMultiReadSeekerSmallReads(t *testing.T) { + readers := []io.ReadSeeker{} + for i := 0; i < 10; i++ { + integer := make([]byte, 4) + binary.BigEndian.PutUint32(integer, uint32(i)) + readers = append(readers, bytes.NewReader(integer)) + } + + reader := MultiReadSeeker(readers...) + for i := 0; i < 10; i++ { + var integer uint32 + if err := binary.Read(reader, binary.BigEndian, &integer); err != nil { + t.Fatalf("Read from NewMultiReadSeeker failed: %v", err) + } + if uint32(i) != integer { + t.Fatalf("Read wrong value from NewMultiReadSeeker: %d != %d", i, integer) + } + } +} diff --git a/daemon/logger/jsonfilelog/read.go b/daemon/logger/jsonfilelog/read.go new file mode 100644 index 0000000..312aa55 --- /dev/null +++ b/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,324 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/fsnotify/fsnotify" + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog/multireader" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: l.Attrs, + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read + // This will block writes!!! + l.mu.Lock() + + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + defer f.Close() + + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + l.mu.Unlock() + return + } + defer latestFile.Close() + + if config.Tail != 0 { + tailer := multireader.MultiReadSeeker(append(files, latestFile)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow || l.closed { + l.mu.Unlock() + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + notifyRotate := l.writer.NotifyRotate() + defer l.writer.NotifyRotateEvict(notifyRotate) + + l.readers[logWatcher] = struct{}{} + + l.mu.Unlock() + + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader + rdr = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case <-logWatcher.WatchClose(): + return + case logWatcher.Msg <- msg: + } + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + fileWatcher, err := filenotify.New() + if err != nil { + return nil, err + } + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logrus.Debugf("error watching log file for modifications: %v", err) + return nil, err + } + } + return fileWatcher, nil +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Remove(name) + fileWatcher.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + cancel() + case <-ctx.Done(): + return + } + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + dec = json.NewDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + dec = json.NewDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-ctx.Done(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debug("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-ctx.Done(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if err == io.EOF { + for { + err := waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil + } + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + return nil + } + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + return nil + } + return err + } + + // main loop + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-ctx.Done(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/daemon/logger/logentries/logentries.go b/daemon/logger/logentries/logentries.go new file mode 100644 index 0000000..a353d9d --- /dev/null +++ b/daemon/logger/logentries/logentries.go @@ -0,0 +1,97 @@ +// Package logentries provides the log driver for forwarding server logs +// to logentries endpoints. +package logentries + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/bsphere/le_go" + "github.com/docker/docker/daemon/logger" +) + +type logentries struct { + tag string + containerID string + containerName string + writer *le_go.Logger + extra map[string]string +} + +const ( + name = "logentries" + token = "logentries-token" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a logentries logger using the configuration passed in on +// the context. The supported context configuration variable is +// logentries-token. +func New(info logger.Info) (logger.Logger, error) { + logrus.WithField("container", info.ContainerID). + WithField("token", info.Config[token]). + Debug("logging driver logentries configured") + + log, err := le_go.Connect(info.Config[token]) + if err != nil { + return nil, err + } + return &logentries{ + containerID: info.ContainerID, + containerName: info.ContainerName, + writer: log, + }, nil +} + +func (f *logentries) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + ts := msg.Timestamp + logger.PutMessage(msg) + f.writer.Println(f.tag, ts, data) + return nil +} + +func (f *logentries) Close() error { + return f.writer.Close() +} + +func (f *logentries) Name() string { + return name +} + +// ValidateLogOpt looks for logentries specific log option logentries-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "tag": + case key: + default: + return fmt.Errorf("unknown log opt '%s' for logentries log driver", key) + } + } + + if cfg[token] == "" { + return fmt.Errorf("Missing logentries token") + } + + return nil +} diff --git a/daemon/logger/logger.go b/daemon/logger/logger.go new file mode 100644 index 0000000..daa5403 --- /dev/null +++ b/daemon/logger/logger.go @@ -0,0 +1,136 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "sync" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging driver does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }} + +// NewMessage returns a new message from the message sync.Pool +func NewMessage() *Message { + return messagePool.Get().(*Message) +} + +// PutMessage puts the specified message back n the message pool. +// The message fields are reset before putting into the pool. +func PutMessage(msg *Message) { + msg.reset() + messagePool.Put(msg) +} + +// Message is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// +// Message is subtyped from backend.LogMessage because there is a lot of +// internal complexity around the Message type that should not be exposed +// to any package not explicitly importing the logger type. +// +// Any changes made to this struct must also be updated in the `reset` function +type Message backend.LogMessage + +// reset sets the message back to default values +// This is used when putting a message back into the message pool. +// Any changes to the `Message` struct should be reflected here. +func (m *Message) reset() { + m.Line = m.Line[:0] + m.Source = "" + m.Attrs = nil + m.Partial = false + + m.Err = nil +} + +// AsLogMessage returns a pointer to the message as a pointer to +// backend.LogMessage, which is an identical type with a different purpose +func (m *Message) AsLogMessage() *backend.LogMessage { + return (*backend.LogMessage)(m) +} + +// LogAttributes is used to hold the extra attributes available in the log message +// Primarily used for converting the map type to string and sorting. +// Imported here so it can be used internally with less refactoring +type LogAttributes backend.LogAttributes + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeOnce sync.Once + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} + +// Capability defines the list of capabilties that a driver can implement +// These capabilities are not required to be a logging driver, however do +// determine how a logging driver can be used +type Capability struct { + // Determines if a log driver can read back logs + ReadLogs bool +} diff --git a/daemon/logger/logger_test.go b/daemon/logger/logger_test.go new file mode 100644 index 0000000..4d6e079 --- /dev/null +++ b/daemon/logger/logger_test.go @@ -0,0 +1,19 @@ +package logger + +func (m *Message) copy() *Message { + msg := &Message{ + Source: m.Source, + Partial: m.Partial, + Timestamp: m.Timestamp, + } + + if m.Attrs != nil { + msg.Attrs = make(map[string]string, len(m.Attrs)) + for k, v := range m.Attrs { + msg.Attrs[k] = v + } + } + + msg.Line = append(make([]byte, 0, len(m.Line)), m.Line...) + return msg +} diff --git a/daemon/logger/loggerutils/log_tag.go b/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 0000000..f801047 --- /dev/null +++ b/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,31 @@ +package loggerutils + +import ( + "bytes" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/templates" +) + +// DefaultTemplate defines the defaults template logger should use. +const DefaultTemplate = "{{.ID}}" + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(info logger.Info, defaultTemplate string) (string, error) { + tagTemplate := info.Config["tag"] + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &info); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/daemon/logger/loggerutils/log_tag_test.go b/daemon/logger/loggerutils/log_tag_test.go new file mode 100644 index 0000000..1a6d9f1 --- /dev/null +++ b/daemon/logger/loggerutils/log_tag_test.go @@ -0,0 +1,47 @@ +package loggerutils + +import ( + "testing" + + "github.com/docker/docker/daemon/logger" +) + +func TestParseLogTagDefaultTag(t *testing.T) { + info := buildContext(map[string]string{}) + tag, e := ParseLogTag(info, "{{.ID}}") + assertTag(t, e, tag, info.ID()) +} + +func TestParseLogTag(t *testing.T) { + info := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) + tag, e := ParseLogTag(info, "{{.ID}}") + assertTag(t, e, tag, "test-image/test-container/container-ab") +} + +func TestParseLogTagEmptyTag(t *testing.T) { + info := buildContext(map[string]string{}) + tag, e := ParseLogTag(info, "{{.DaemonName}}/{{.ID}}") + assertTag(t, e, tag, "test-dockerd/container-ab") +} + +// Helpers + +func buildContext(cfg map[string]string) logger.Info { + return logger.Info{ + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerName: "/test-container", + ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerImageName: "test-image", + Config: cfg, + DaemonName: "test-dockerd", + } +} + +func assertTag(t *testing.T, e error, tag string, expected string) { + if e != nil { + t.Fatalf("Error generating tag: %q", e) + } + if tag != expected { + t.Fatalf("Wrong tag: %q, should be %q", tag, expected) + } +} diff --git a/daemon/logger/loggerutils/rotatefilewriter.go b/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 0000000..ecb461f --- /dev/null +++ b/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,141 @@ +package loggerutils + +import ( + "errors" + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + closed bool + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if w.closed { + w.mu.Unlock() + return -1, errors.New("cannot write because the output file was closed") + } + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + w.mu.Lock() + defer w.mu.Unlock() + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closed { + return nil + } + if err := w.f.Close(); err != nil { + return err + } + w.closed = true + return nil +} diff --git a/daemon/logger/loginfo.go b/daemon/logger/loginfo.go new file mode 100644 index 0000000..4c930b9 --- /dev/null +++ b/daemon/logger/loginfo.go @@ -0,0 +1,129 @@ +package logger + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" +) + +// Info provides enough information for a logging driver to do its function. +type Info struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (info *Info) ExtraAttributes(keyMod func(string) string) (map[string]string, error) { + extra := make(map[string]string) + labels, ok := info.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := info.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envMapping := make(map[string]string) + for _, e := range info.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + + env, ok := info.Config["env"] + if ok && len(env) > 0 { + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envRegex, ok := info.Config["env-regex"] + if ok && len(envRegex) > 0 { + re, err := regexp.Compile(envRegex) + if err != nil { + return nil, err + } + for k, v := range envMapping { + if re.MatchString(k) { + if keyMod != nil { + k = keyMod(k) + } + extra[k] = v + } + } + } + + return extra, nil +} + +// Hostname returns the hostname from the underlying OS. +func (info *Info) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (info *Info) Command() string { + terms := []string{info.ContainerEntrypoint} + terms = append(terms, info.ContainerArgs...) + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (info *Info) ID() string { + return info.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (info *Info) FullID() string { + return info.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (info *Info) Name() string { + return strings.TrimPrefix(info.ContainerName, "/") +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (info *Info) ImageID() string { + return info.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (info *Info) ImageFullID() string { + return info.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (info *Info) ImageName() string { + return info.ContainerImageName +} diff --git a/daemon/logger/plugin.go b/daemon/logger/plugin.go new file mode 100644 index 0000000..bdccea5 --- /dev/null +++ b/daemon/logger/plugin.go @@ -0,0 +1,90 @@ +package logger + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/plugins/logdriver" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +var pluginGetter getter.PluginGetter + +const extName = "LogDriver" + +// logPlugin defines the available functions that logging plugins must implement. +type logPlugin interface { + StartLogging(streamPath string, info Info) (err error) + StopLogging(streamPath string) (err error) + Capabilities() (cap Capability, err error) + ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + pluginGetter = plugingetter +} + +// GetDriver returns a logging driver by its name. +// If the driver is empty, it looks for the local driver. +func getPlugin(name string, mode int) (Creator, error) { + p, err := pluginGetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("error looking up logging plugin %s: %v", name, err) + } + + d := &logPluginProxy{p.Client()} + return makePluginCreator(name, d, p.BasePath()), nil +} + +func makePluginCreator(name string, l *logPluginProxy, basePath string) Creator { + return func(logCtx Info) (logger Logger, err error) { + defer func() { + if err != nil { + pluginGetter.Get(name, extName, getter.Release) + } + }() + root := filepath.Join(basePath, "run", "docker", "logging") + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + + id := stringid.GenerateNonCryptoID() + a := &pluginAdapter{ + driverName: name, + id: id, + plugin: l, + basePath: basePath, + fifoPath: filepath.Join(root, id), + logInfo: logCtx, + } + + cap, err := a.plugin.Capabilities() + if err == nil { + a.capabilities = cap + } + + stream, err := openPluginStream(a) + if err != nil { + return nil, err + } + + a.stream = stream + a.enc = logdriver.NewLogEntryEncoder(a.stream) + + if err := l.StartLogging(strings.TrimPrefix(a.fifoPath, basePath), logCtx); err != nil { + return nil, errors.Wrapf(err, "error creating logger") + } + + if cap.ReadLogs { + return &pluginAdapterWithRead{a}, nil + } + + return a, nil + } +} diff --git a/daemon/logger/plugin_unix.go b/daemon/logger/plugin_unix.go new file mode 100644 index 0000000..f254c9c --- /dev/null +++ b/daemon/logger/plugin_unix.go @@ -0,0 +1,20 @@ +// +build linux solaris freebsd + +package logger + +import ( + "context" + "io" + + "github.com/pkg/errors" + "github.com/tonistiigi/fifo" + "golang.org/x/sys/unix" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, errors.Wrapf(err, "error creating i/o pipe for log plugin: %s", a.Name()) + } + return f, nil +} diff --git a/daemon/logger/plugin_unsupported.go b/daemon/logger/plugin_unsupported.go new file mode 100644 index 0000000..0a2036c --- /dev/null +++ b/daemon/logger/plugin_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!solaris,!freebsd + +package logger + +import ( + "errors" + "io" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + return nil, errors.New("log plugin not supported") +} diff --git a/daemon/logger/proxy.go b/daemon/logger/proxy.go new file mode 100644 index 0000000..53860eb --- /dev/null +++ b/daemon/logger/proxy.go @@ -0,0 +1,107 @@ +package logger + +import ( + "errors" + "io" +) + +type client interface { + Call(string, interface{}, interface{}) error + Stream(string, interface{}) (io.ReadCloser, error) +} + +type logPluginProxy struct { + client +} + +type logPluginProxyStartLoggingRequest struct { + File string + Info Info +} + +type logPluginProxyStartLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StartLogging(file string, info Info) (err error) { + var ( + req logPluginProxyStartLoggingRequest + ret logPluginProxyStartLoggingResponse + ) + + req.File = file + req.Info = info + if err = pp.Call("LogDriver.StartLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyStopLoggingRequest struct { + File string +} + +type logPluginProxyStopLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StopLogging(file string) (err error) { + var ( + req logPluginProxyStopLoggingRequest + ret logPluginProxyStopLoggingResponse + ) + + req.File = file + if err = pp.Call("LogDriver.StopLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyCapabilitiesResponse struct { + Cap Capability + Err string +} + +func (pp *logPluginProxy) Capabilities() (cap Capability, err error) { + var ( + ret logPluginProxyCapabilitiesResponse + ) + + if err = pp.Call("LogDriver.Capabilities", nil, &ret); err != nil { + return + } + + cap = ret.Cap + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyReadLogsRequest struct { + Info Info + Config ReadConfig +} + +func (pp *logPluginProxy) ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) { + var ( + req logPluginProxyReadLogsRequest + ) + + req.Info = info + req.Config = config + return pp.Stream("LogDriver.ReadLogs", req) +} diff --git a/daemon/logger/ring.go b/daemon/logger/ring.go new file mode 100644 index 0000000..90769d7 --- /dev/null +++ b/daemon/logger/ring.go @@ -0,0 +1,218 @@ +package logger + +import ( + "errors" + "sync" + "sync/atomic" + + "github.com/Sirupsen/logrus" +) + +const ( + defaultRingMaxSize = 1e6 // 1MB +) + +// RingLogger is a ring buffer that implements the Logger interface. +// This is used when lossy logging is OK. +type RingLogger struct { + buffer *messageRing + l Logger + logInfo Info + closeFlag int32 +} + +type ringWithReader struct { + *RingLogger +} + +func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher { + reader, ok := r.l.(LogReader) + if !ok { + // something is wrong if we get here + panic("expected log reader") + } + return reader.ReadLogs(cfg) +} + +func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger { + l := &RingLogger{ + buffer: newRing(maxSize), + l: driver, + logInfo: logInfo, + } + go l.run() + return l +} + +// NewRingLogger creates a new Logger that is implemented as a RingBuffer wrapping +// the passed in logger. +func NewRingLogger(driver Logger, logInfo Info, maxSize int64) Logger { + if maxSize < 0 { + maxSize = defaultRingMaxSize + } + l := newRingLogger(driver, logInfo, maxSize) + if _, ok := driver.(LogReader); ok { + return &ringWithReader{l} + } + return l +} + +// Log queues messages into the ring buffer +func (r *RingLogger) Log(msg *Message) error { + if r.closed() { + return errClosed + } + return r.buffer.Enqueue(msg) +} + +// Name returns the name of the underlying logger +func (r *RingLogger) Name() string { + return r.l.Name() +} + +func (r *RingLogger) closed() bool { + return atomic.LoadInt32(&r.closeFlag) == 1 +} + +func (r *RingLogger) setClosed() { + atomic.StoreInt32(&r.closeFlag, 1) +} + +// Close closes the logger +func (r *RingLogger) Close() error { + r.setClosed() + r.buffer.Close() + // empty out the queue + var logErr bool + for _, msg := range r.buffer.Drain() { + if logErr { + // some error logging a previous message, so re-insert to message pool + // and assume log driver is hosed + PutMessage(msg) + continue + } + + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()).WithField("container", r.logInfo.ContainerID).Errorf("Error writing log message: %v", r.l) + logErr = true + } + } + return r.l.Close() +} + +// run consumes messages from the ring buffer and forwards them to the underling +// logger. +// This is run in a goroutine when the RingLogger is created +func (r *RingLogger) run() { + for { + if r.closed() { + return + } + msg, err := r.buffer.Dequeue() + if err != nil { + // buffer is closed + return + } + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()).WithField("container", r.logInfo.ContainerID).Errorf("Error writing log message: %v", r.l) + } + } +} + +type messageRing struct { + mu sync.Mutex + // singals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added + wait *sync.Cond + + sizeBytes int64 // current buffer size + maxBytes int64 // max buffer size size + queue []*Message + closed bool +} + +func newRing(maxBytes int64) *messageRing { + queueSize := 1000 + if maxBytes == 0 || maxBytes == 1 { + // With 0 or 1 max byte size, the maximum size of the queue would only ever be 1 + // message long. + queueSize = 1 + } + + r := &messageRing{queue: make([]*Message, 0, queueSize), maxBytes: maxBytes} + r.wait = sync.NewCond(&r.mu) + return r +} + +// Enqueue adds a message to the buffer queue +// If the message is too big for the buffer it drops the oldest messages to make room +// If there are no messages in the queue and the message is still too big, it adds the message anyway. +func (r *messageRing) Enqueue(m *Message) error { + mSize := int64(len(m.Line)) + + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return errClosed + } + if mSize+r.sizeBytes > r.maxBytes && len(r.queue) > 0 { + r.wait.Signal() + r.mu.Unlock() + return nil + } + + r.queue = append(r.queue, m) + r.sizeBytes += mSize + r.wait.Signal() + r.mu.Unlock() + return nil +} + +// Dequeue pulls a message off the queue +// If there are no messages, it waits for one. +// If the buffer is closed, it will return immediately. +func (r *messageRing) Dequeue() (*Message, error) { + r.mu.Lock() + for len(r.queue) == 0 && !r.closed { + r.wait.Wait() + } + + if r.closed { + r.mu.Unlock() + return nil, errClosed + } + + msg := r.queue[0] + r.queue = r.queue[1:] + r.sizeBytes -= int64(len(msg.Line)) + r.mu.Unlock() + return msg, nil +} + +var errClosed = errors.New("closed") + +// Close closes the buffer ensuring no new messages can be added. +// Any callers waiting to dequeue a message will be woken up. +func (r *messageRing) Close() { + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return + } + + r.closed = true + r.wait.Broadcast() + r.mu.Unlock() + return +} + +// Drain drains all messages from the queue. +// This can be used after `Close()` to get any remaining messages that were in queue. +func (r *messageRing) Drain() []*Message { + r.mu.Lock() + ls := make([]*Message, 0, len(r.queue)) + ls = append(ls, r.queue...) + r.sizeBytes = 0 + r.queue = r.queue[:0] + r.mu.Unlock() + return ls +} diff --git a/daemon/logger/ring_test.go b/daemon/logger/ring_test.go new file mode 100644 index 0000000..9afbb44 --- /dev/null +++ b/daemon/logger/ring_test.go @@ -0,0 +1,299 @@ +package logger + +import ( + "context" + "strconv" + "testing" + "time" +) + +type mockLogger struct{ c chan *Message } + +func (l *mockLogger) Log(msg *Message) error { + l.c <- msg + return nil +} + +func (l *mockLogger) Name() string { + return "mock" +} + +func (l *mockLogger) Close() error { + return nil +} + +func TestRingLogger(t *testing.T) { + mockLog := &mockLogger{make(chan *Message)} // no buffer on this channel + ring := newRingLogger(mockLog, Info{}, 1) + defer ring.setClosed() + + // this should never block + ring.Log(&Message{Line: []byte("1")}) + ring.Log(&Message{Line: []byte("2")}) + ring.Log(&Message{Line: []byte("3")}) + + select { + case msg := <-mockLog.c: + if string(msg.Line) != "1" { + t.Fatalf("got unexpected msg: %q", string(msg.Line)) + } + case <-time.After(100 * time.Millisecond): + t.Fatal("timeout reading log message") + } + + select { + case msg := <-mockLog.c: + t.Fatalf("expected no more messages in the queue, got: %q", string(msg.Line)) + default: + } +} + +func TestRingCap(t *testing.T) { + r := newRing(5) + for i := 0; i < 10; i++ { + // queue messages with "0" to "10" + // the "5" to "10" messages should be dropped since we only allow 5 bytes in the buffer + if err := r.Enqueue(&Message{Line: []byte(strconv.Itoa(i))}); err != nil { + t.Fatal(err) + } + } + + // should have messages in the queue for "5" to "10" + for i := 0; i < 5; i++ { + m, err := r.Dequeue() + if err != nil { + t.Fatal(err) + } + if string(m.Line) != strconv.Itoa(i) { + t.Fatalf("got unexpected message for iter %d: %s", i, string(m.Line)) + } + } + + // queue a message that's bigger than the buffer cap + if err := r.Enqueue(&Message{Line: []byte("hello world")}); err != nil { + t.Fatal(err) + } + + // queue another message that's bigger than the buffer cap + if err := r.Enqueue(&Message{Line: []byte("eat a banana")}); err != nil { + t.Fatal(err) + } + + m, err := r.Dequeue() + if err != nil { + t.Fatal(err) + } + if string(m.Line) != "hello world" { + t.Fatalf("got unexpected message: %s", string(m.Line)) + } + if len(r.queue) != 0 { + t.Fatalf("expected queue to be empty, got: %d", len(r.queue)) + } +} + +func TestRingClose(t *testing.T) { + r := newRing(1) + if err := r.Enqueue(&Message{Line: []byte("hello")}); err != nil { + t.Fatal(err) + } + r.Close() + if err := r.Enqueue(&Message{}); err != errClosed { + t.Fatalf("expected errClosed, got: %v", err) + } + if len(r.queue) != 1 { + t.Fatal("expected empty queue") + } + if m, err := r.Dequeue(); err == nil || m != nil { + t.Fatal("expected err on Dequeue after close") + } + + ls := r.Drain() + if len(ls) != 1 { + t.Fatalf("expected one message: %v", ls) + } + if string(ls[0].Line) != "hello" { + t.Fatalf("got unexpected message: %s", string(ls[0].Line)) + } +} + +func TestRingDrain(t *testing.T) { + r := newRing(5) + for i := 0; i < 5; i++ { + if err := r.Enqueue(&Message{Line: []byte(strconv.Itoa(i))}); err != nil { + t.Fatal(err) + } + } + + ls := r.Drain() + if len(ls) != 5 { + t.Fatal("got unexpected length after drain") + } + + for i := 0; i < 5; i++ { + if string(ls[i].Line) != strconv.Itoa(i) { + t.Fatalf("got unexpected message at position %d: %s", i, string(ls[i].Line)) + } + } + if r.sizeBytes != 0 { + t.Fatalf("expected buffer size to be 0 after drain, got: %d", r.sizeBytes) + } + + ls = r.Drain() + if len(ls) != 0 { + t.Fatalf("expected 0 messages on 2nd drain: %v", ls) + } + +} + +type nopLogger struct{} + +func (nopLogger) Name() string { return "nopLogger" } +func (nopLogger) Close() error { return nil } +func (nopLogger) Log(*Message) error { return nil } + +func BenchmarkRingLoggerThroughputNoReceiver(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputWithReceiverDelay0(b *testing.B) { + l := NewRingLogger(nopLogger{}, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func consumeWithDelay(delay time.Duration, c <-chan *Message) (cancel func()) { + started := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + close(started) + ticker := time.NewTicker(delay) + for range ticker.C { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-c: + } + } + }() + <-started + return cancel +} + +func BenchmarkRingLoggerThroughputConsumeDelay1(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(1*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay10(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(10*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay50(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(50*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay100(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(100*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay300(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(300*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay500(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(500*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} diff --git a/daemon/logger/splunk/splunk.go b/daemon/logger/splunk/splunk.go new file mode 100644 index 0000000..233a2db --- /dev/null +++ b/daemon/logger/splunk/splunk.go @@ -0,0 +1,626 @@ +// Package splunk provides the log driver for forwarding server logs to +// Splunk HTTP Event Collector endpoint. +package splunk + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const ( + driverName = "splunk" + splunkURLKey = "splunk-url" + splunkTokenKey = "splunk-token" + splunkSourceKey = "splunk-source" + splunkSourceTypeKey = "splunk-sourcetype" + splunkIndexKey = "splunk-index" + splunkCAPathKey = "splunk-capath" + splunkCANameKey = "splunk-caname" + splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" + splunkFormatKey = "splunk-format" + splunkVerifyConnectionKey = "splunk-verify-connection" + splunkGzipCompressionKey = "splunk-gzip" + splunkGzipCompressionLevelKey = "splunk-gzip-level" + envKey = "env" + envRegexKey = "env-regex" + labelsKey = "labels" + tagKey = "tag" +) + +const ( + // How often do we send messages (if we are not reaching batch size) + defaultPostMessagesFrequency = 5 * time.Second + // How big can be batch of messages + defaultPostMessagesBatchSize = 1000 + // Maximum number of messages we can store in buffer + defaultBufferMaximum = 10 * defaultPostMessagesBatchSize + // Number of messages allowed to be queued in the channel + defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize +) + +const ( + envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY" + envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE" + envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX" + envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE" +) + +type splunkLoggerInterface interface { + logger.Logger + worker() +} + +type splunkLogger struct { + client *http.Client + transport *http.Transport + + url string + auth string + nullMessage *splunkMessage + + // http compression + gzipCompression bool + gzipCompressionLevel int + + // Advanced options + postMessagesFrequency time.Duration + postMessagesBatchSize int + bufferMaximum int + + // For synchronization between background worker and logger. + // We use channel to send messages to worker go routine. + // All other variables for blocking Close call before we flush all messages to HEC + stream chan *splunkMessage + lock sync.RWMutex + closed bool + closedCond *sync.Cond +} + +type splunkLoggerInline struct { + *splunkLogger + + nullEvent *splunkMessageEvent +} + +type splunkLoggerJSON struct { + *splunkLoggerInline +} + +type splunkLoggerRaw struct { + *splunkLogger + + prefix []byte +} + +type splunkMessage struct { + Event interface{} `json:"event"` + Time string `json:"time"` + Host string `json:"host"` + Source string `json:"source,omitempty"` + SourceType string `json:"sourcetype,omitempty"` + Index string `json:"index,omitempty"` +} + +type splunkMessageEvent struct { + Line interface{} `json:"line"` + Source string `json:"source"` + Tag string `json:"tag,omitempty"` + Attrs map[string]string `json:"attrs,omitempty"` +} + +const ( + splunkFormatRaw = "raw" + splunkFormatJSON = "json" + splunkFormatInline = "inline" +) + +func init() { + if err := logger.RegisterLogDriver(driverName, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates splunk logger driver using configuration passed in context +func New(info logger.Info) (logger.Logger, error) { + hostname, err := info.Hostname() + if err != nil { + return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) + } + + // Parse and validate Splunk URL + splunkURL, err := parseURL(info) + if err != nil { + return nil, err + } + + // Splunk Token is required parameter + splunkToken, ok := info.Config[splunkTokenKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) + } + + tlsConfig := &tls.Config{} + + // Splunk is using autogenerated certificates by default, + // allow users to trust them with skipping verification + if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok { + insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) + if err != nil { + return nil, err + } + tlsConfig.InsecureSkipVerify = insecureSkipVerify + } + + // If path to the root certificate is provided - load it + if caPath, ok := info.Config[splunkCAPathKey]; ok { + caCert, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, err + } + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caPool + } + + if caName, ok := info.Config[splunkCANameKey]; ok { + tlsConfig.ServerName = caName + } + + gzipCompression := false + if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok { + gzipCompression, err = strconv.ParseBool(gzipCompressionStr) + if err != nil { + return nil, err + } + } + + gzipCompressionLevel := gzip.DefaultCompression + if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok { + var err error + gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) + if err != nil { + return nil, err + } + gzipCompressionLevel = int(gzipCompressionLevel64) + if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { + err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", + gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) + return nil, err + } + } + + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + client := &http.Client{ + Transport: transport, + } + + source := info.Config[splunkSourceKey] + sourceType := info.Config[splunkSourceTypeKey] + index := info.Config[splunkIndexKey] + + var nullMessage = &splunkMessage{ + Host: hostname, + Source: source, + SourceType: sourceType, + Index: index, + } + + // Allow user to remove tag from the messages by setting tag to empty string + tag := "" + if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" { + tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + } + + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + var ( + postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) + postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) + bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) + streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) + ) + + logger := &splunkLogger{ + client: client, + transport: transport, + url: splunkURL.String(), + auth: "Splunk " + splunkToken, + nullMessage: nullMessage, + gzipCompression: gzipCompression, + gzipCompressionLevel: gzipCompressionLevel, + stream: make(chan *splunkMessage, streamChannelSize), + postMessagesFrequency: postMessagesFrequency, + postMessagesBatchSize: postMessagesBatchSize, + bufferMaximum: bufferMaximum, + } + + // By default we verify connection, but we allow use to skip that + verifyConnection := true + if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok { + var err error + verifyConnection, err = strconv.ParseBool(verifyConnectionStr) + if err != nil { + return nil, err + } + } + if verifyConnection { + err = verifySplunkConnection(logger) + if err != nil { + return nil, err + } + } + + var splunkFormat string + if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok { + switch splunkFormatParsed { + case splunkFormatInline: + case splunkFormatJSON: + case splunkFormatRaw: + default: + return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) + } + splunkFormat = splunkFormatParsed + } else { + splunkFormat = splunkFormatInline + } + + var loggerWrapper splunkLoggerInterface + + switch splunkFormat { + case splunkFormatInline: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerInline{logger, nullEvent} + case splunkFormatJSON: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} + case splunkFormatRaw: + var prefix bytes.Buffer + if tag != "" { + prefix.WriteString(tag) + prefix.WriteString(" ") + } + for key, value := range attrs { + prefix.WriteString(key) + prefix.WriteString("=") + prefix.WriteString(value) + prefix.WriteString(" ") + } + + loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} + default: + return nil, fmt.Errorf("Unexpected format %s", splunkFormat) + } + + go loggerWrapper.worker() + + return loggerWrapper, nil +} + +func (l *splunkLoggerInline) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + event := *l.nullEvent + event.Line = string(msg.Line) + event.Source = msg.Source + + message.Event = &event + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerJSON) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + event := *l.nullEvent + + var rawJSONMessage json.RawMessage + if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil { + event.Line = &rawJSONMessage + } else { + event.Line = string(msg.Line) + } + + event.Source = msg.Source + + message.Event = &event + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerRaw) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + message.Event = string(append(l.prefix, msg.Line...)) + logger.PutMessage(msg) + return l.queueMessageAsync(message) +} + +func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error { + l.lock.RLock() + defer l.lock.RUnlock() + if l.closedCond != nil { + return fmt.Errorf("%s: driver is closed", driverName) + } + l.stream <- message + return nil +} + +func (l *splunkLogger) worker() { + timer := time.NewTicker(l.postMessagesFrequency) + var messages []*splunkMessage + for { + select { + case message, open := <-l.stream: + if !open { + l.postMessages(messages, true) + l.lock.Lock() + defer l.lock.Unlock() + l.transport.CloseIdleConnections() + l.closed = true + l.closedCond.Signal() + return + } + messages = append(messages, message) + // Only sending when we get exactly to the batch size, + // This also helps not to fire postMessages on every new message, + // when previous try failed. + if len(messages)%l.postMessagesBatchSize == 0 { + messages = l.postMessages(messages, false) + } + case <-timer.C: + messages = l.postMessages(messages, false) + } + } +} + +func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage { + messagesLen := len(messages) + for i := 0; i < messagesLen; i += l.postMessagesBatchSize { + upperBound := i + l.postMessagesBatchSize + if upperBound > messagesLen { + upperBound = messagesLen + } + if err := l.tryPostMessages(messages[i:upperBound]); err != nil { + logrus.Error(err) + if messagesLen-i >= l.bufferMaximum || lastChance { + // If this is last chance - print them all to the daemon log + if lastChance { + upperBound = messagesLen + } + // Not all sent, but buffer has got to its maximum, let's log all messages + // we could not send and return buffer minus one batch size + for j := i; j < upperBound; j++ { + if jsonEvent, err := json.Marshal(messages[j]); err != nil { + logrus.Error(err) + } else { + logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) + } + } + return messages[upperBound:messagesLen] + } + // Not all sent, returning buffer from where we have not sent messages + return messages[i:messagesLen] + } + } + // All sent, return empty buffer + return messages[:0] +} + +func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { + if len(messages) == 0 { + return nil + } + var buffer bytes.Buffer + var writer io.Writer + var gzipWriter *gzip.Writer + var err error + // If gzip compression is enabled - create gzip writer with specified compression + // level. If gzip compression is disabled, use standard buffer as a writer + if l.gzipCompression { + gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) + if err != nil { + return err + } + writer = gzipWriter + } else { + writer = &buffer + } + for _, message := range messages { + jsonEvent, err := json.Marshal(message) + if err != nil { + return err + } + if _, err := writer.Write(jsonEvent); err != nil { + return err + } + } + // If gzip compression is enabled, tell it, that we are done + if l.gzipCompression { + err = gzipWriter.Close() + if err != nil { + return err + } + } + req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) + if err != nil { + return err + } + req.Header.Set("Authorization", l.auth) + // Tell if we are sending gzip compressed body + if l.gzipCompression { + req.Header.Set("Content-Encoding", "gzip") + } + res, err := l.client.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) + } + io.Copy(ioutil.Discard, res.Body) + return nil +} + +func (l *splunkLogger) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if l.closedCond == nil { + l.closedCond = sync.NewCond(&l.lock) + close(l.stream) + for !l.closed { + l.closedCond.Wait() + } + } + return nil +} + +func (l *splunkLogger) Name() string { + return driverName +} + +func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage { + message := *l.nullMessage + message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second)) + return &message +} + +// ValidateLogOpt looks for all supported by splunk driver options +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case splunkURLKey: + case splunkTokenKey: + case splunkSourceKey: + case splunkSourceTypeKey: + case splunkIndexKey: + case splunkCAPathKey: + case splunkCANameKey: + case splunkInsecureSkipVerifyKey: + case splunkFormatKey: + case splunkVerifyConnectionKey: + case splunkGzipCompressionKey: + case splunkGzipCompressionLevelKey: + case envKey: + case envRegexKey: + case labelsKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) + } + } + return nil +} + +func parseURL(info logger.Info) (*url.URL, error) { + splunkURLStr, ok := info.Config[splunkURLKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) + } + + splunkURL, err := url.Parse(splunkURLStr) + if err != nil { + return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) + } + + if !urlutil.IsURL(splunkURLStr) || + !splunkURL.IsAbs() || + (splunkURL.Path != "" && splunkURL.Path != "/") || + splunkURL.RawQuery != "" || + splunkURL.Fragment != "" { + return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) + } + + splunkURL.Path = "/services/collector/event/1.0" + + return splunkURL, nil +} + +func verifySplunkConnection(l *splunkLogger) error { + req, err := http.NewRequest(http.MethodOptions, l.url, nil) + if err != nil { + return err + } + res, err := l.client.Do(req) + if err != nil { + return err + } + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) + } + return nil +} + +func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := time.ParseDuration(valueStr) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) + return defaultValue + } + return parsedValue +} + +func getAdvancedOptionInt(envName string, defaultValue int) int { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := strconv.ParseInt(valueStr, 10, 32) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) + return defaultValue + } + return int(parsedValue) +} diff --git a/daemon/logger/splunk/splunk_test.go b/daemon/logger/splunk/splunk_test.go new file mode 100644 index 0000000..cbe9a55 --- /dev/null +++ b/daemon/logger/splunk/splunk_test.go @@ -0,0 +1,1306 @@ +package splunk + +import ( + "compress/gzip" + "fmt" + "os" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" +) + +// Validate options +func TestValidateLogOpt(t *testing.T) { + err := ValidateLogOpt(map[string]string{ + splunkURLKey: "http://127.0.0.1", + splunkTokenKey: "2160C7EF-2CE9-4307-A180-F852B99CF417", + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkCAPathKey: "/usr/cert.pem", + splunkCANameKey: "ca_name", + splunkInsecureSkipVerifyKey: "true", + splunkFormatKey: "json", + splunkVerifyConnectionKey: "true", + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + envKey: "a", + envRegexKey: "^foo", + labelsKey: "b", + tagKey: "c", + }) + if err != nil { + t.Fatal(err) + } + + err = ValidateLogOpt(map[string]string{ + "not-supported-option": "a", + }) + if err == nil { + t.Fatal("Expecting error on unsupported options") + } +} + +// Driver require user to specify required options +func TestNewMissedConfig(t *testing.T) { + info := logger.Info{ + Config: map[string]string{}, + } + _, err := New(info) + if err == nil { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-url +func TestNewMissedUrl(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + splunkTokenKey: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + }, + } + _, err := New(info) + if err.Error() != "splunk: splunk-url is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-token +func TestNewMissedToken(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: "http://127.0.0.1:8088", + }, + } + _, err := New(info) + if err.Error() != "splunk: splunk-token is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Test default settings +func TestDefault(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if loggerDriver.Name() != driverName { + t.Fatal("Unexpected logger driver name") + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Found not default values setup in Splunk Logging Driver.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notajson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + if *hec.gzipEnabled { + t.Fatal("Gzip should not be used") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "{\"a\":\"b\"}" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message2) + } + + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notajson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify inline format with a not default settings for most of options +func TestInlineFormatWithNonDefaultOptions(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkFormatKey: splunkFormatInline, + splunkGzipCompressionKey: "true", + tagKey: "{{.ImageName}}/{{.Name}}", + labelsKey: "a", + envRegexKey: "^foo", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + ContainerEnv: []string{"foo_finder=bar"}, + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "mysource" || + splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || + splunkLoggerDriver.nullMessage.Index != "myindex" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + messageTime := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("1"), Source: "stdout", Timestamp: messageTime}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 1 { + t.Fatal("Expected one message") + } + + if !*hec.gzipEnabled { + t.Fatal("Gzip should be used") + } + + message := hec.messages[0] + if message.Time != fmt.Sprintf("%f", float64(messageTime.UnixNano())/float64(time.Second)) || + message.Host != hostname || + message.Source != "mysource" || + message.SourceType != "mysourcetype" || + message.Index != "myindex" { + t.Fatalf("Unexpected values of message %v", message) + } + + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "1" || + event["source"] != "stdout" || + event["tag"] != "container_image_name/container_name" || + event["attrs"].(map[string]interface{})["a"] != "b" || + event["attrs"].(map[string]interface{})["foo_finder"] != "bar" || + len(event) != 4 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify JSON format +func TestJsonFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatJSON, + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerJSON) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"].(map[string]interface{})["a"] != "b" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + // If message cannot be parsed as JSON - it should be sent as a line + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notjson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format +func TestRawFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format with labels +func TestRawFormatWithLabels(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid a=b " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b notjson" { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that Splunk Logging Driver can accept tag="" which will allow to send raw messages +// in the same way we get them in stdout/stderr +func TestRawFormatWithoutTag(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + tagKey: "", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := info.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "" { + t.Log(string(splunkLoggerDriver.prefix) + "a") + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "{\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "notjson" { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that we will send messages in batches with default batching parameters, +// but change frequency to be sure that numOfRequests will match expected 17 requests +func TestBatching(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 16 batches + if hec.numOfRequests != 17 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that test is using time to fire events not rare than specified frequency +func TestFrequency(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "5ms"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + time.Sleep(15 * time.Millisecond) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 to verify that we have sent messages with required frequency, + // but because frequency is too small (to keep test quick), instead of 11, use 9 if context switches will be slow + if hec.numOfRequests < 9 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Simulate behavior similar to first version of Splunk Logging Driver, when we were sending one message +// per request +func TestOneMessagePerRequest(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 messages + if hec.numOfRequests != 11 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Driver should not be created when HEC is unresponsive +func TestVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + _, err := New(info) + if err == nil { + t.Fatal("Expecting driver to fail, when server is unresponsive") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that user can specify to skip verification that Splunk HEC is working. +// Also in this test we verify retry logic. +func TestSkipVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < defaultStreamChannelSize*2; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify logic for when we filled whole buffer +func TestBufferMaximum(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "10"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 11; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 9 { + t.Fatalf("Expected # of messages %d, got %d", 9, len(hec.messages)) + } + + // First 1000 messages are written to daemon log when buffer was full + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i+2) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that we are not blocking close when HEC is down for the whole time +func TestServerAlwaysDown(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "4"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 5; i++ { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be sent") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Cannot send messages after we close driver +func TestCannotSendAfterClose(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + go hec.Serve() + + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(info) + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{Line: []byte("message1"), Source: "stdout", Timestamp: time.Now()}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{Line: []byte("message2"), Source: "stdout", Timestamp: time.Now()}); err == nil { + t.Fatal("Driver should not allow to send messages after close") + } + + if len(hec.messages) != 1 { + t.Fatal("Only one message should be sent") + } + + message := hec.messages[0] + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "message1" { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/daemon/logger/splunk/splunkhecmock_test.go b/daemon/logger/splunk/splunkhecmock_test.go new file mode 100644 index 0000000..e508948 --- /dev/null +++ b/daemon/logger/splunk/splunkhecmock_test.go @@ -0,0 +1,157 @@ +package splunk + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "testing" +) + +func (message *splunkMessage) EventAsString() (string, error) { + if val, ok := message.Event.(string); ok { + return val, nil + } + return "", fmt.Errorf("Cannot cast Event %v to string", message.Event) +} + +func (message *splunkMessage) EventAsMap() (map[string]interface{}, error) { + if val, ok := message.Event.(map[string]interface{}); ok { + return val, nil + } + return nil, fmt.Errorf("Cannot cast Event %v to map", message.Event) +} + +type HTTPEventCollectorMock struct { + tcpAddr *net.TCPAddr + tcpListener *net.TCPListener + + token string + simulateServerError bool + + test *testing.T + + connectionVerified bool + gzipEnabled *bool + messages []*splunkMessage + numOfRequests int +} + +func NewHTTPEventCollectorMock(t *testing.T) *HTTPEventCollectorMock { + tcpAddr := &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0, Zone: ""} + tcpListener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + t.Fatal(err) + } + return &HTTPEventCollectorMock{ + tcpAddr: tcpAddr, + tcpListener: tcpListener, + token: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + simulateServerError: false, + test: t, + connectionVerified: false} +} + +func (hec *HTTPEventCollectorMock) URL() string { + return "http://" + hec.tcpListener.Addr().String() +} + +func (hec *HTTPEventCollectorMock) Serve() error { + return http.Serve(hec.tcpListener, hec) +} + +func (hec *HTTPEventCollectorMock) Close() error { + return hec.tcpListener.Close() +} + +func (hec *HTTPEventCollectorMock) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + var err error + + hec.numOfRequests++ + + if hec.simulateServerError { + if request.Body != nil { + defer request.Body.Close() + } + writer.WriteHeader(http.StatusInternalServerError) + return + } + + switch request.Method { + case http.MethodOptions: + // Verify that options method is getting called only once + if hec.connectionVerified { + hec.test.Errorf("Connection should not be verified more than once. Got second request with %s method.", request.Method) + } + hec.connectionVerified = true + writer.WriteHeader(http.StatusOK) + case http.MethodPost: + // Always verify that Driver is using correct path to HEC + if request.URL.String() != "/services/collector/event/1.0" { + hec.test.Errorf("Unexpected path %v", request.URL) + } + defer request.Body.Close() + + if authorization, ok := request.Header["Authorization"]; !ok || authorization[0] != ("Splunk "+hec.token) { + hec.test.Error("Authorization header is invalid.") + } + + gzipEnabled := false + if contentEncoding, ok := request.Header["Content-Encoding"]; ok && contentEncoding[0] == "gzip" { + gzipEnabled = true + } + + if hec.gzipEnabled == nil { + hec.gzipEnabled = &gzipEnabled + } else if gzipEnabled != *hec.gzipEnabled { + // Nothing wrong with that, but we just know that Splunk Logging Driver does not do that + hec.test.Error("Driver should not change Content Encoding.") + } + + var gzipReader *gzip.Reader + var reader io.Reader + if gzipEnabled { + gzipReader, err = gzip.NewReader(request.Body) + if err != nil { + hec.test.Fatal(err) + } + reader = gzipReader + } else { + reader = request.Body + } + + // Read body + var body []byte + body, err = ioutil.ReadAll(reader) + if err != nil { + hec.test.Fatal(err) + } + + // Parse message + messageStart := 0 + for i := 0; i < len(body); i++ { + if i == len(body)-1 || (body[i] == '}' && body[i+1] == '{') { + var message splunkMessage + err = json.Unmarshal(body[messageStart:i+1], &message) + if err != nil { + hec.test.Log(string(body[messageStart : i+1])) + hec.test.Fatal(err) + } + hec.messages = append(hec.messages, &message) + messageStart = i + 1 + } + } + + if gzipEnabled { + gzipReader.Close() + } + + writer.WriteHeader(http.StatusOK) + default: + hec.test.Errorf("Unexpected HTTP method %s", http.MethodOptions) + writer.WriteHeader(http.StatusBadRequest) + } +} diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go new file mode 100644 index 0000000..42855e1 --- /dev/null +++ b/daemon/logger/syslog/syslog.go @@ -0,0 +1,266 @@ +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances +// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximum +// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) +func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, syslog-format. +func New(info logger.Info) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(info.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(info.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(info.Config["syslog-format"], proto) + if err != nil { + return nil, err + } + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(info.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, tag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + line := string(msg.Line) + source := msg.Source + logger.PutMessage(msg) + if source == "stderr" { + return s.writer.Err(line) + } + return s.writer.Info(line) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix and unixgram socket validation + if url.Scheme == "unix" || url.Scheme == "unixgram" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "env-regex": + case "labels": + case "syslog-address": + case "syslog-facility": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + if proto == secureProto { + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil + case "rfc5424micro": + if proto == secureProto { + return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/daemon/logger/syslog/syslog_test.go b/daemon/logger/syslog/syslog_test.go new file mode 100644 index 0000000..5015610 --- /dev/null +++ b/daemon/logger/syslog/syslog_test.go @@ -0,0 +1,62 @@ +package syslog + +import ( + "reflect" + "testing" + + syslog "github.com/RackSec/srslog" +) + +func functionMatches(expectedFun interface{}, actualFun interface{}) bool { + return reflect.ValueOf(expectedFun).Pointer() == reflect.ValueOf(actualFun).Pointer() +} + +func TestParseLogFormat(t *testing.T) { + formatter, framer, err := parseLogFormat("rfc5424", "udp") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424", "tcp+tls") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "udp") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "tcp+tls") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc3164", "") + if err != nil || !functionMatches(syslog.RFC3164Formatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc3164 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("", "") + if err != nil || !functionMatches(syslog.UnixFormatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse empty format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("invalid", "") + if err == nil { + t.Fatal("Failed to parse invalid format", err, formatter, framer) + } +} + +func TestValidateLogOptEmpty(t *testing.T) { + emptyConfig := make(map[string]string) + if err := ValidateLogOpt(emptyConfig); err != nil { + t.Fatal("Failed to parse empty config", err) + } +} diff --git a/daemon/logs.go b/daemon/logs.go new file mode 100644 index 0000000..96e1b8a --- /dev/null +++ b/daemon/logs.go @@ -0,0 +1,175 @@ +package daemon + +import ( + "errors" + "strconv" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" +) + +// ContainerLogs copies the container's log channel to the channel provided in +// the config. If ContainerLogs returns an error, no messages have been copied. +// and the channel will be closed without data. +// +// if it returns nil, the config channel will be active and return log +// messages until it runs out or the context is canceled. +func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *types.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { + lg := logrus.WithFields(logrus.Fields{ + "module": "daemon", + "method": "(*Daemon).ContainerLogs", + "container": containerName, + }) + + if !(config.ShowStdout || config.ShowStderr) { + return nil, errors.New("You must choose at least one stream") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return nil, err + } + + if container.RemovalInProgress || container.Dead { + return nil, errors.New("can not get logs from container which is dead or marked for removal") + } + + if container.HostConfig.LogConfig.Type == "none" { + return nil, logger.ErrReadLogsNotSupported + } + + cLog, cLogCreated, err := daemon.getLogger(container) + if err != nil { + return nil, err + } + if cLogCreated { + defer func() { + if err = cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } + + logReader, ok := cLog.(logger.LogReader) + if !ok { + return nil, logger.ErrReadLogsNotSupported + } + + follow := config.Follow && !cLogCreated + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return nil, err + } + since = time.Unix(s, n) + } + + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + + logs := logReader.ReadLogs(readConfig) + + // past this point, we can't possibly return any errors, so we can just + // start a goroutine and return to tell the caller not to expect errors + // (if the caller wants to give up on logs, they have to cancel the context) + // this goroutine functions as a shim between the logger and the caller. + messageChan := make(chan *backend.LogMessage, 1) + go func() { + // set up some defers + defer logs.Close() + + // close the messages channel. closing is the only way to signal above + // that we're doing with logs (other than context cancel i guess). + defer close(messageChan) + + lg.Debug("begin logs") + for { + select { + // i do not believe as the system is currently designed any error + // is possible, but we should be prepared to handle it anyway. if + // we do get an error, copy only the error field to a new object so + // we don't end up with partial data in the other fields + case err := <-logs.Err: + lg.Errorf("Error streaming logs: %v", err) + select { + case <-ctx.Done(): + case messageChan <- &backend.LogMessage{Err: err}: + } + return + case <-ctx.Done(): + lg.Debug("logs: end stream, ctx is done: %v", ctx.Err()) + return + case msg, ok := <-logs.Msg: + // there is some kind of pool or ring buffer in the logger that + // produces these messages, and a possible future optimization + // might be to use that pool and reuse message objects + if !ok { + lg.Debug("end logs") + return + } + m := msg.AsLogMessage() // just a pointer conversion, does not copy data + + // there could be a case where the reader stops accepting + // messages and the context is canceled. we need to check that + // here, or otherwise we risk blocking forever on the message + // send. + select { + case <-ctx.Done(): + return + case messageChan <- m: + } + } + } + }() + return messageChan, nil +} + +func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) { + container.Lock() + if container.State.Running { + l = container.LogDriver + } + container.Unlock() + if l == nil { + created = true + l, err = container.StartLogger() + } + return +} + +// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. +func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { + if cfg.Type == "" { + cfg.Type = daemon.defaultLogConfig.Type + } + + if cfg.Config == nil { + cfg.Config = make(map[string]string) + } + + if cfg.Type == daemon.defaultLogConfig.Type { + for k, v := range daemon.defaultLogConfig.Config { + if _, ok := cfg.Config[k]; !ok { + cfg.Config[k] = v + } + } + } + + return logger.ValidateLogOpts(cfg.Type, cfg.Config) +} diff --git a/daemon/logs_test.go b/daemon/logs_test.go new file mode 100644 index 0000000..0c36299 --- /dev/null +++ b/daemon/logs_test.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" +) + +func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) { + d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}} + cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type} + if err := d.mergeAndVerifyLogConfig(&cfg); err != nil { + t.Fatal(err) + } +} diff --git a/daemon/metrics.go b/daemon/metrics.go new file mode 100644 index 0000000..bf9e49d --- /dev/null +++ b/daemon/metrics.go @@ -0,0 +1,174 @@ +package daemon + +import ( + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-metrics" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +const metricsPluginType = "MetricsCollector" + +var ( + containerActions metrics.LabeledTimer + containerStates metrics.LabeledGauge + imageActions metrics.LabeledTimer + networkActions metrics.LabeledTimer + engineInfo metrics.LabeledGauge + engineCpus metrics.Gauge + engineMemory metrics.Gauge + healthChecksCounter metrics.Counter + healthChecksFailedCounter metrics.Counter + + stateCtr *stateCounter +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + containerActions = ns.NewLabeledTimer("container_actions", "The number of seconds it takes to process each container action", "action") + for _, a := range []string{ + "start", + "changes", + "commit", + "create", + "delete", + } { + containerActions.WithValues(a).Update(0) + } + + networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") + engineInfo = ns.NewLabeledGauge("engine", "The information related to the engine and the OS it is running on", metrics.Unit("info"), + "version", + "commit", + "architecture", + "graphdriver", + "kernel", "os", + "os_type", + "daemon_id", // ID is a randomly generated unique identifier (e.g. UUID4) + ) + engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) + engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) + healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") + healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") + imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + + stateCtr = newStateCounter(ns.NewDesc("container_states", "The count of containers in various states", metrics.Unit("containers"), "state")) + ns.Add(stateCtr) + + metrics.Register(ns) +} + +type stateCounter struct { + mu sync.Mutex + states map[string]string + desc *prometheus.Desc +} + +func newStateCounter(desc *prometheus.Desc) *stateCounter { + return &stateCounter{ + states: make(map[string]string), + desc: desc, + } +} + +func (ctr *stateCounter) get() (running int, paused int, stopped int) { + ctr.mu.Lock() + defer ctr.mu.Unlock() + + states := map[string]int{ + "running": 0, + "paused": 0, + "stopped": 0, + } + for _, state := range ctr.states { + states[state]++ + } + return states["running"], states["paused"], states["stopped"] +} + +func (ctr *stateCounter) set(id, label string) { + ctr.mu.Lock() + ctr.states[id] = label + ctr.mu.Unlock() +} + +func (ctr *stateCounter) del(id string) { + ctr.mu.Lock() + delete(ctr.states, id) + ctr.mu.Unlock() +} + +func (ctr *stateCounter) Describe(ch chan<- *prometheus.Desc) { + ch <- ctr.desc +} + +func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) { + running, paused, stopped := ctr.get() + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(running), "running") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(paused), "paused") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped") +} + +func (d *Daemon) cleanupMetricsPlugins() { + ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType) + var wg sync.WaitGroup + wg.Add(len(ls)) + + for _, p := range ls { + go func() { + defer wg.Done() + pluginStopMetricsCollection(p) + }() + } + wg.Wait() + + if d.metricsPluginListener != nil { + d.metricsPluginListener.Close() + } +} + +type metricsPlugin struct { + plugingetter.CompatPlugin +} + +func (p metricsPlugin) sock() string { + return "metrics.sock" +} + +func (p metricsPlugin) sockBase() string { + return filepath.Join(p.BasePath(), "run", "docker") +} + +func pluginStartMetricsCollection(p plugingetter.CompatPlugin) error { + type metricsPluginResponse struct { + Err string + } + var res metricsPluginResponse + if err := p.Client().Call(metricsPluginType+".StartMetrics", nil, &res); err != nil { + return errors.Wrap(err, "could not start metrics plugin") + } + if res.Err != "" { + return errors.New(res.Err) + } + return nil +} + +func pluginStopMetricsCollection(p plugingetter.CompatPlugin) { + if err := p.Client().Call(metricsPluginType+".StopMetrics", nil, nil); err != nil { + logrus.WithError(err).WithField("name", p.Name()).Error("error stopping metrics collector") + } + + mp := metricsPlugin{p} + sockPath := filepath.Join(mp.sockBase(), mp.sock()) + if err := mount.Unmount(sockPath); err != nil { + if mounted, _ := mount.Mounted(sockPath); mounted { + logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin") + } + } + return +} diff --git a/daemon/metrics_unix.go b/daemon/metrics_unix.go new file mode 100644 index 0000000..cda7355 --- /dev/null +++ b/daemon/metrics_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package daemon + +import ( + "net" + "net/http" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + metrics "github.com/docker/go-metrics" + "github.com/pkg/errors" +) + +func (daemon *Daemon) listenMetricsSock() (string, error) { + path := filepath.Join(daemon.configStore.ExecRoot, "metrics.sock") + syscall.Unlink(path) + l, err := net.Listen("unix", path) + if err != nil { + return "", errors.Wrap(err, "error setting up metrics plugin listener") + } + + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + http.Serve(l, mux) + }() + daemon.metricsPluginListener = l + return path, nil +} + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { + getter.Handle(metricsPluginType, func(name string, client *plugins.Client) { + // Use lookup since nothing in the system can really reference it, no need + // to protect against removal + p, err := getter.Get(name, metricsPluginType, plugingetter.Lookup) + if err != nil { + return + } + + mp := metricsPlugin{p} + sockBase := mp.sockBase() + if err := os.MkdirAll(sockBase, 0755); err != nil { + logrus.WithError(err).WithField("name", name).WithField("path", sockBase).Error("error creating metrics plugin base path") + return + } + + defer func() { + if err != nil { + os.RemoveAll(sockBase) + } + }() + + pluginSockPath := filepath.Join(sockBase, mp.sock()) + _, err = os.Stat(pluginSockPath) + if err == nil { + mount.Unmount(pluginSockPath) + } else { + logrus.WithField("path", pluginSockPath).Debugf("creating plugin socket") + f, err := os.OpenFile(pluginSockPath, os.O_CREATE, 0600) + if err != nil { + return + } + f.Close() + } + + if err := mount.Mount(sockPath, pluginSockPath, "none", "bind,ro"); err != nil { + logrus.WithError(err).WithField("name", name).Error("could not mount metrics socket to plugin") + return + } + + if err := pluginStartMetricsCollection(p); err != nil { + if err := mount.Unmount(pluginSockPath); err != nil { + if mounted, _ := mount.Mounted(pluginSockPath); mounted { + logrus.WithError(err).WithField("sock_path", pluginSockPath).Error("error unmounting metrics socket from plugin during cleanup") + } + } + logrus.WithError(err).WithField("name", name).Error("error while initializing metrics plugin") + } + }) +} diff --git a/daemon/metrics_unsupported.go b/daemon/metrics_unsupported.go new file mode 100644 index 0000000..64dc181 --- /dev/null +++ b/daemon/metrics_unsupported.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon + +import "github.com/docker/docker/pkg/plugingetter" + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { +} + +func (daemon *Daemon) listenMetricsSock() (string, error) { + return "", nil +} diff --git a/daemon/monitor.go b/daemon/monitor.go new file mode 100644 index 0000000..5993ae9 --- /dev/null +++ b/daemon/monitor.go @@ -0,0 +1,180 @@ +package daemon + +import ( + "errors" + "fmt" + "runtime" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/restartmanager" +) + +func (daemon *Daemon) setStateCounter(c *container.Container) { + switch c.StateString() { + case "paused": + stateCtr.set(c.ID, "paused") + case "running": + stateCtr.set(c.ID, "running") + default: + stateCtr.set(c.ID, "stopped") + } +} + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.updateHealthMonitor(c) + c.Lock() + defer c.Unlock() + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + + c.Lock() + c.StreamConfig.Wait() + c.Reset(false) + + var health types.Health + if c.Health != nil { + health = c.Health.Health + } + + // If daemon is being shutdown, don't let the container restart + restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt), health) + if err == nil && restart { + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + } else { + c.SetStopped(platformConstructExitStatus(e)) + defer daemon.autoRemove(c) + } + + // cancel healthcheck here, they will be automatically + // restarted if/when the container is started again + daemon.stopHealthchecks(c) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + + if err == nil && restart { + go func() { + err := <-wait + if err == nil { + // daemon.netController is initialized when daemon is restoring containers. + // But containerStart will use daemon.netController segment. + // So to avoid panic at startup process, here must wait util daemon restore done. + daemon.waitForStartupDone() + if err = daemon.containerStart(c, "", "", false); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } + } + if err != nil { + c.SetStopped(platformConstructExitStatus(e)) + defer daemon.autoRemove(c) + if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } + } + }() + } + + daemon.setStateCounter(c) + + defer c.Unlock() + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + return daemon.postRunProcessing(c, e) + case libcontainerd.StateExitProcess: + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.Lock() + defer execConfig.Unlock() + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.StreamConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + // Container is already locked in this case + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + c.HasBeenStartedBefore = true + daemon.setStateCounter(c) + + daemon.initHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + c.Reset(false) + return err + } + + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + // Container is already locked in this case + c.Paused = true + daemon.setStateCounter(c) + daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + // Container is already locked in this case + c.Paused = false + daemon.setStateCounter(c) + daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "unpause") + } + return nil +} + +func (daemon *Daemon) autoRemove(c *container.Container) { + c.Lock() + ar := c.HostConfig.AutoRemove + c.Unlock() + if !ar { + return + } + + var err error + if err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err == nil { + return + } + if c := daemon.containers.Get(c.ID); c == nil { + return + } + + if err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error removing container") + } +} diff --git a/daemon/monitor_linux.go b/daemon/monitor_linux.go new file mode 100644 index 0000000..09f5af5 --- /dev/null +++ b/daemon/monitor_linux.go @@ -0,0 +1,19 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/daemon/monitor_solaris.go b/daemon/monitor_solaris.go new file mode 100644 index 0000000..5ccfada --- /dev/null +++ b/daemon/monitor_solaris.go @@ -0,0 +1,18 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/daemon/monitor_windows.go b/daemon/monitor_windows.go new file mode 100644 index 0000000..9648b1b --- /dev/null +++ b/daemon/monitor_windows.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + if e.ExitCode == 0 && e.UpdatePending { + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + newOpts := []libcontainerd.CreateOption{&libcontainerd.ServicingOption{ + IsServicing: true, + }} + + copts, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if copts != nil { + newOpts = append(newOpts, copts...) + } + + // Create a new servicing container, which will start, complete the update, and merge back the + // results if it succeeded, all as part of the below function call. + if err := daemon.containerd.Create((container.ID + "_servicing"), "", "", *spec, container.InitializeStdio, newOpts...); err != nil { + container.SetExitCode(-1) + return fmt.Errorf("Post-run update servicing failed: %s", err) + } + } + return nil +} diff --git a/daemon/mounts.go b/daemon/mounts.go new file mode 100644 index 0000000..35c6ed5 --- /dev/null +++ b/daemon/mounts.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "fmt" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Type != mounttypes.TypeVolume || m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if !rm { + continue + } + + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Spec.Source != "" { + continue + } + + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/daemon/names.go b/daemon/names.go new file mode 100644 index 0000000..ec6ac29 --- /dev/null +++ b/daemon/names.go @@ -0,0 +1,112 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/stringid" +) + +var ( + validContainerNameChars = api.RestrictedNameChars + validContainerNamePattern = api.RestrictedNamePattern +) + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + } + return daemon.nameIndex.Reserve(container.Name, container.ID) +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(strings.TrimPrefix(name, "/")) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + id, err := daemon.nameIndex.Get(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The container name %q is already in use by container %q. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %q, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.nameIndex.Release(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.nameIndex.Reserve(name, id); err != nil { + return "", err + } + return name, nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} diff --git a/daemon/network.go b/daemon/network.go new file mode 100644 index 0000000..366c2a5 --- /dev/null +++ b/daemon/network.go @@ -0,0 +1,567 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "sort" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + lncluster "github.com/docker/libnetwork/cluster" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + networktypes "github.com/docker/libnetwork/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// NetworkControllerEnabled checks if the networking stack is enabled. +// This feature depends on OS primitives and it's disabled in systems like Windows. +func (daemon *Daemon) NetworkControllerEnabled() bool { + return daemon.netController != nil +} + +// FindNetwork function finds a network for a given string that can represent network name or id +func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { + // Find by Name + n, err := daemon.GetNetworkByName(idName) + if err != nil && !isNoSuchNetworkError(err) { + return nil, err + } + + if n != nil { + return n, nil + } + + // Find by id + return daemon.GetNetworkByID(idName) +} + +func isNoSuchNetworkError(err error) bool { + _, ok := err.(libnetwork.ErrNoSuchNetwork) + return ok +} + +// GetNetworkByID function returns a network whose ID begins with the given prefix. +// It fails with an error if no matching, or more than one matching, networks are found. +func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { + list := daemon.GetNetworksByID(partialID) + + if len(list) == 0 { + return nil, libnetwork.ErrNoSuchNetwork(partialID) + } + if len(list) > 1 { + return nil, libnetwork.ErrInvalidID(partialID) + } + return list[0], nil +} + +// GetNetworkByName function returns a network for a given network name. +// If no network name is given, the default network is returned. +func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { + c := daemon.netController + if c == nil { + return nil, libnetwork.ErrNoSuchNetwork(name) + } + if name == "" { + name = c.Config().Daemon.DefaultNetwork + } + return c.NetworkByName(name) +} + +// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks +func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { + c := daemon.netController + if c == nil { + return nil + } + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + if strings.HasPrefix(nw.ID(), partialID) { + list = append(list, nw) + } + return false + } + c.WalkNetworks(l) + + return list +} + +// getAllNetworks returns a list containing all networks +func (daemon *Daemon) getAllNetworks() []libnetwork.Network { + return daemon.netController.Networks() +} + +type ingressJob struct { + create *clustertypes.NetworkCreateRequest + ip net.IP + jobDone chan struct{} +} + +var ( + ingressWorkerOnce sync.Once + ingressJobsChannel chan *ingressJob + ingressID string +) + +func (daemon *Daemon) startIngressWorker() { + ingressJobsChannel = make(chan *ingressJob, 100) + go func() { + for { + select { + case r := <-ingressJobsChannel: + if r.create != nil { + daemon.setupIngress(r.create, r.ip, ingressID) + ingressID = r.create.ID + } else { + daemon.releaseIngress(ingressID) + ingressID = "" + } + close(r.jobDone) + } + } + }() +} + +// enqueueIngressJob adds a ingress add/rm request to the worker queue. +// It guarantees the worker is started. +func (daemon *Daemon) enqueueIngressJob(job *ingressJob) { + ingressWorkerOnce.Do(daemon.startIngressWorker) + ingressJobsChannel <- job +} + +// SetupIngress setups ingress networking. +// The function returns a channel which will signal the caller when the programming is completed. +func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + return nil, err + } + done := make(chan struct{}) + daemon.enqueueIngressJob(&ingressJob{&create, ip, done}) + return done, nil +} + +// ReleaseIngress releases the ingress networking. +// The function returns a channel which will signal the caller when the programming is completed. +func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) { + done := make(chan struct{}) + daemon.enqueueIngressJob(&ingressJob{nil, nil, done}) + return done, nil +} + +func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) { + controller := daemon.netController + controller.AgentInitWait() + + if staleID != "" && staleID != create.ID { + daemon.releaseIngress(staleID) + } + + if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { + // If it is any other error other than already + // exists error log error and return. + if _, ok := err.(libnetwork.NetworkNameError); !ok { + logrus.Errorf("Failed creating ingress network: %v", err) + return + } + // Otherwise continue down the call to create or recreate sandbox. + } + + n, err := daemon.GetNetworkByID(create.ID) + if err != nil { + logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + } + + sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) + if err != nil { + if _, ok := err.(networktypes.ForbiddenError); !ok { + logrus.Errorf("Failed creating ingress sandbox: %v", err) + } + return + } + + ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) + if err != nil { + logrus.Errorf("Failed creating ingress endpoint: %v", err) + return + } + + if err := ep.Join(sb, nil); err != nil { + logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) + return + } + + if err := sb.EnableService(); err != nil { + logrus.Errorf("Failed enabling service for ingress sandbox") + } +} + +func (daemon *Daemon) releaseIngress(id string) { + controller := daemon.netController + if err := controller.SandboxDestroy("ingress-sbox"); err != nil { + logrus.Errorf("Failed to delete ingress sandbox: %v", err) + } + + if id == "" { + return + } + + n, err := controller.NetworkByID(id) + if err != nil { + logrus.Errorf("failed to retrieve ingress network %s: %v", id, err) + return + } + + for _, ep := range n.Endpoints() { + if err := ep.Delete(true); err != nil { + logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) + return + } + } + + if err := n.Delete(); err != nil { + logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) + return + } + return +} + +// SetNetworkBootstrapKeys sets the bootstrap keys. +func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { + err := daemon.netController.SetKeys(keys) + if err == nil { + // Upon successful key setting dispatch the keys available event + daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable) + } + return err +} + +// UpdateAttachment notifies the attacher about the attachment config. +func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil { + return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config) + } + + return nil +} + +// WaitForDetachment makes the cluster manager wait for detachment of +// the container from the network. +func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID) +} + +// CreateManagedNetwork creates an agent network. +func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { + _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) + return err +} + +// CreateNetwork creates a network with the given name, driver and other optional parameters +func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { + resp, err := daemon.createNetwork(create, "", false) + if err != nil { + return nil, err + } + return resp, err +} + +func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { + if runconfig.IsPreDefinedNetwork(create.Name) && !agent { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) + return nil, apierrors.NewRequestForbiddenError(err) + } + + var warning string + nw, err := daemon.GetNetworkByName(create.Name) + if err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + return nil, err + } + } + if nw != nil { + // check if user defined CheckDuplicate, if set true, return err + // otherwise prepare a warning message + if create.CheckDuplicate { + return nil, libnetwork.NetworkNameError(create.Name) + } + warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) + } + + c := daemon.netController + driver := create.Driver + if driver == "" { + driver = c.Config().Daemon.DefaultDriver + } + + nwOptions := []libnetwork.NetworkOption{ + libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(create.Options), + libnetwork.NetworkOptionLabels(create.Labels), + libnetwork.NetworkOptionAttachable(create.Attachable), + libnetwork.NetworkOptionIngress(create.Ingress), + libnetwork.NetworkOptionScope(create.Scope), + } + + if create.ConfigOnly { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly()) + } + + if create.IPAM != nil { + ipam := create.IPAM + v4Conf, v6Conf, err := getIpamConfig(ipam.Config) + if err != nil { + return nil, err + } + nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options)) + } + + if create.Internal { + nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) + } + if agent { + nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) + nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) + } + + if create.ConfigFrom != nil { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network)) + } + + n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) + if err != nil { + if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok { + return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + return nil, err + } + + daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire) + if create.IPAM != nil { + daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire) + } + daemon.LogNetworkEvent(n, "create") + + return &types.NetworkCreateResponse{ + ID: n.ID(), + Warning: warning, + }, nil +} + +func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) { + var builtinDrivers []string + + if capability == driverapi.NetworkPluginEndpointType { + builtinDrivers = daemon.netController.BuiltinDrivers() + } else if capability == ipamapi.PluginEndpointType { + builtinDrivers = daemon.netController.BuiltinIPAMDrivers() + } + + for _, d := range builtinDrivers { + if d == driver { + return + } + } + + if daemon.PluginStore != nil { + _, err := daemon.PluginStore.Get(driver, capability, mode) + if err != nil { + logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") + } + } +} + +func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { + ipamV4Cfg := []*libnetwork.IpamConf{} + ipamV6Cfg := []*libnetwork.IpamConf{} + for _, d := range data { + iCfg := libnetwork.IpamConf{} + iCfg.PreferredPool = d.Subnet + iCfg.SubPool = d.IPRange + iCfg.Gateway = d.Gateway + iCfg.AuxAddresses = d.AuxAddress + ip, _, err := net.ParseCIDR(d.Subnet) + if err != nil { + return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) + } + if ip.To4() != nil { + ipamV4Cfg = append(ipamV4Cfg, &iCfg) + } else { + ipamV6Cfg = append(ipamV6Cfg, &iCfg) + } + } + return ipamV4Cfg, ipamV6Cfg, nil +} + +// UpdateContainerServiceConfig updates a service configuration. +func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + container.NetworkSettings.Service = serviceConfig + return nil +} + +// ConnectContainerToNetwork connects the given container to the given +// network. If either cannot be found, an err is returned. If the +// network cannot be set up, an err is returned. +func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network connect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + return daemon.ConnectToNetwork(container, networkName, endpointConfig) +} + +// DisconnectContainerFromNetwork disconnects the given container from +// the given network. If either cannot be found, an err is returned. +func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network disconnect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + if force { + return daemon.ForceEndpointDelete(containerName, networkName) + } + return err + } + return daemon.DisconnectFromNetwork(container, networkName, force) +} + +// GetNetworkDriverList returns the list of plugins drivers +// registered for network. +func (daemon *Daemon) GetNetworkDriverList() []string { + if !daemon.NetworkControllerEnabled() { + return nil + } + + pluginList := daemon.netController.BuiltinDrivers() + + managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType) + + for _, plugin := range managedPlugins { + pluginList = append(pluginList, plugin.Name()) + } + + pluginMap := make(map[string]bool) + for _, plugin := range pluginList { + pluginMap[plugin] = true + } + + networks := daemon.netController.Networks() + + for _, network := range networks { + if !pluginMap[network.Type()] { + pluginList = append(pluginList, network.Type()) + pluginMap[network.Type()] = true + } + } + + sort.Strings(pluginList) + + return pluginList +} + +// DeleteManagedNetwork deletes an agent network. +func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, true) +} + +// DeleteNetwork destroys a network unless it's one of docker's predefined networks. +func (daemon *Daemon) DeleteNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, false) +} + +func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { + nw, err := daemon.FindNetwork(networkID) + if err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { + err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if dynamic && !nw.Info().Dynamic() { + if runconfig.IsPreDefinedNetwork(nw.Name()) { + // Predefined networks now support swarm services. Make this + // a no-op when cluster requests to remove the predefined network. + return nil + } + err := fmt.Errorf("%s is not a dynamic network", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if err := nw.Delete(); err != nil { + return err + } + + // If this is not a configuration only network, we need to + // update the corresponding remote drivers' reference counts + if !nw.Info().ConfigOnly() { + daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release) + ipamType, _, _, _ := nw.Info().IpamConfig() + daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release) + daemon.LogNetworkEvent(nw, "destroy") + } + + return nil +} + +// GetNetworks returns a list of all networks +func (daemon *Daemon) GetNetworks() []libnetwork.Network { + return daemon.getAllNetworks() +} + +// clearAttachableNetworks removes the attachable networks +// after disconnecting any connected container +func (daemon *Daemon) clearAttachableNetworks() { + for _, n := range daemon.GetNetworks() { + if !n.Info().Attachable() { + continue + } + for _, ep := range n.Endpoints() { + epInfo := ep.Info() + if epInfo == nil { + continue + } + sb := epInfo.Sandbox() + if sb == nil { + continue + } + containerID := sb.ContainerID() + if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil { + logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", + containerID, n.Name(), err) + } + } + if err := daemon.DeleteManagedNetwork(n.ID()); err != nil { + logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) + } + } +} diff --git a/daemon/network/settings.go b/daemon/network/settings.go new file mode 100644 index 0000000..8f6b7dd --- /dev/null +++ b/daemon/network/settings.go @@ -0,0 +1,33 @@ +package network + +import ( + networktypes "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*EndpointSettings + Service *clustertypes.ServiceConfig + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool + HasSwarmEndpoint bool +} + +// EndpointSettings is a package local wrapper for +// networktypes.EndpointSettings which stores Endpoint state that +// needs to be persisted to disk but not exposed in the api. +type EndpointSettings struct { + *networktypes.EndpointSettings + IPAMOperational bool +} diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go new file mode 100644 index 0000000..6d74301 --- /dev/null +++ b/daemon/oci_linux.go @@ -0,0 +1,838 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + daemonconfig "github.com/docker/docker/daemon/config" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +var ( + deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$") +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes, err := getCPUResources(r) + if err != nil { + return err + } + blkioWeight := r.BlkioWeight + + specResources := &specs.LinuxResources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.LinuxBlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.LinuxPids{ + Limit: r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.LinuxDevice + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, oci.Device(d)) + } + devPermissions = []specs.LinuxDeviceCgroup{ + { + Allow: true, + Access: "rwm", + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + + for _, deviceCgroupRule := range c.HostConfig.DeviceCgroupRules { + ss := deviceCgroupRuleRegex.FindAllStringSubmatch(deviceCgroupRule, -1) + if len(ss[0]) != 5 { + return fmt.Errorf("invalid device cgroup rule format: '%s'", deviceCgroupRule) + } + matches := ss[0] + + dPermissions := specs.LinuxDeviceCgroup{ + Allow: true, + Type: matches[1], + Access: matches[4], + } + if matches[2] == "*" { + major := int64(-1) + dPermissions.Major = &major + } else { + major, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return fmt.Errorf("invalid major value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Major = &major + } + if matches[3] == "*" { + minor := int64(-1) + dPermissions.Minor = &minor + } else { + minor, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return fmt.Errorf("invalid minor value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Minor = &minor + } + devPermissions = append(devPermissions, dPermissions) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.LinuxRlimit + + // We want to leave the original HostConfig alone so make a copy here + hostConfig := *c.HostConfig + // Merge with the daemon defaults + daemon.mergeUlimits(&hostConfig) + for _, ul := range hostConfig.Ulimits { + rlimits = append(rlimits, specs.LinuxRlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.LinuxNamespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Effective, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities.Effective = caplist + s.Process.Capabilities.Bounding = caplist + s.Process.Capabilities.Permitted = caplist + s.Process.Capabilities.Inheritable = caplist + return nil +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap := daemon.idMappings.UIDs() + if uidMap != nil { + userNS = true + ns := specs.LinuxNamespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(daemon.idMappings.GIDs()) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.LinuxNamespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = c.NetworkSettings.SandboxKey + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.LinuxNamespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("ipc")) + } else { + ns := specs.LinuxNamespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsContainer() { + ns := specs.LinuxNamespace{Type: "pid"} + pc, err := daemon.getPidContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share a PID namespace, they must also share a user namespace + nsUser := specs.LinuxNamespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.PidMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("pid")) + } else { + ns := specs.LinuxNamespace{Type: "pid"} + setNamespace(s, ns) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + oci.RemoveNamespace(s, specs.LinuxNamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { + var ids []specs.LinuxIDMapping + for _, item := range s { + ids = append(ids, specs.LinuxIDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overridden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + data := m.Data + options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)} + if data != "" { + options = append(options, strings.Split(data, ",")...) + } + + merged, err := mount.MergeTmpfsOptions(options) + if err != nil { + return err + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap := daemon.idMappings.UIDs(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + + // only add the custom init if it is specified and the container is running in its + // own private pid namespace. It does not make sense to add if it is running in the + // host namespace or another container's pid namespace where we already have an init + if c.HostConfig.PidMode.IsPrivate() { + if (c.HostConfig.Init != nil && *c.HostConfig.Init) || + (c.HostConfig.Init == nil && daemon.configStore.Init) { + s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...) + var path string + if daemon.configStore.InitPath == "" { + path, err = exec.LookPath(daemonconfig.DefaultInitBinary) + if err != nil { + return err + } + } + if daemon.configStore.InitPath != "" { + path = daemon.configStore.InitPath + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/dev/init", + Type: "bind", + Source: path, + Options: []string{"bind", "ro"}, + }) + } + } + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + s.Linux.Sysctl = c.HostConfig.Sysctls + + p := s.Linux.CgroupsPath + if useSystemd { + initPath, err := cgroups.GetInitCgroup("cpu") + if err != nil { + return nil, err + } + p, _ = cgroups.GetOwnCgroup("cpu") + if err != nil { + return nil, err + } + p = filepath.Join(initPath, p) + } + + // Clean path to guard against things like ../../../BAD + parentPath := filepath.Dir(p) + if !filepath.IsAbs(parentPath) { + parentPath = filepath.Clean("/" + parentPath) + } + + if err := daemon.initCgroupsPath(parentPath); err != nil { + return nil, fmt.Errorf("linux init cgroups path: %v", err) + } + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + ms = append(ms, c.IpcMounts()...) + + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + + if m := c.SecretMounts(); m != nil { + ms = append(ms, m...) + } + + ms = append(ms, c.ConfigMounts()...) + + sort.Sort(mounts(ms)) + if err := setMounts(daemon, &s, c, ms); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + for _, ns := range s.Linux.Namespaces { + if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + if err != nil { + return nil, err + } + + s.Hooks = &specs.Hooks{ + Prestart: []specs.Hook{{ + Path: target, // FIXME: cross-platform + Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, + }}, + } + } + } + + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return nil, err + } + } + + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*specs.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + ulimits := c.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + c.Ulimits = ulimits +} diff --git a/daemon/oci_solaris.go b/daemon/oci_solaris.go new file mode 100644 index 0000000..610efe1 --- /dev/null +++ b/daemon/oci_solaris.go @@ -0,0 +1,187 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "sort" + "strconv" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/libnetwork" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + mem := getMemoryResources(r) + s.Solaris.CappedMemory = &mem + + capCPU := getCPUResources(r) + s.Solaris.CappedCPU = &capCPU + + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + return 0, 0, nil, nil +} + +func (daemon *Daemon) getRunzAnet(ep libnetwork.Endpoint) (specs.Anet, error) { + var ( + linkName string + lowerLink string + defRouter string + ) + + epInfo := ep.Info() + if epInfo == nil { + return specs.Anet{}, fmt.Errorf("invalid endpoint") + } + + nw, err := daemon.GetNetworkByName(ep.Network()) + if err != nil { + return specs.Anet{}, fmt.Errorf("Failed to get network %s: %v", ep.Network(), err) + } + + // Evaluate default router, linkname and lowerlink for interface endpoint + switch nw.Type() { + case "bridge": + defRouter = epInfo.Gateway().String() + linkName = "net0" // Should always be net0 for a container + + // TODO We construct lowerlink here exactly as done for solaris bridge + // initialization. Need modular code to reuse. + options := nw.Info().DriverOptions() + nwName := options["com.docker.network.bridge.name"] + lastChar := nwName[len(nwName)-1:] + if _, err = strconv.Atoi(lastChar); err != nil { + lowerLink = nwName + "_0" + } else { + lowerLink = nwName + } + + case "overlay": + defRouter = "" + linkName = "net1" + + // TODO Follows generateVxlanName() in solaris overlay. + id := nw.ID() + if len(nw.ID()) > 12 { + id = nw.ID()[:12] + } + lowerLink = "vx_" + id + "_0" + } + + runzanet := specs.Anet{ + Linkname: linkName, + Lowerlink: lowerLink, + Allowedaddr: epInfo.Iface().Address().String(), + Configallowedaddr: "true", + Defrouter: defRouter, + Linkprotection: "mac-nospoof, ip-nospoof", + Macaddress: epInfo.Iface().MacAddress().String(), + } + + return runzanet, nil +} + +func (daemon *Daemon) setNetworkInterface(s *specs.Spec, c *container.Container) error { + var anets []specs.Anet + + sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) + if err != nil { + return fmt.Errorf("Could not obtain sandbox for container") + } + + // Populate interfaces required for each endpoint + for _, ep := range sb.Endpoints() { + runzanet, err := daemon.getRunzAnet(ep) + if err != nil { + return fmt.Errorf("Failed to get interface information for endpoint %d: %v", ep.ID(), err) + } + anets = append(anets, runzanet) + } + + s.Solaris.Anet = anets + if anets != nil { + s.Solaris.Milestone = "svc:/milestone/container:default" + } + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: filepath.Dir(c.BaseFS), + Readonly: c.HostConfig.ReadonlyRootfs, + } + if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { + return err + } + cwd := c.Config.WorkingDir + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("runtime spec resources: %v", err) + } + + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("spec user: %v", err) + } + + if err := daemon.setNetworkInterface(&s, c); err != nil { + return nil, err + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + ms = append(ms, c.IpcMounts()...) + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + sort.Sort(mounts(ms)) + + return (*specs.Spec)(&s), nil +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/daemon/oci_windows.go b/daemon/oci_windows.go new file mode 100644 index 0000000..9f44c66 --- /dev/null +++ b/daemon/oci_windows.go @@ -0,0 +1,208 @@ +package daemon + +import ( + "syscall" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + img, err := daemon.GetImage(string(c.ImageID)) + if err != nil { + return nil, err + } + + s := oci.DefaultOSSpec(img.OS) + + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return nil, err + } + + // Note, unlike Unix, we do NOT call into SetupWorkingDirectory as + // this is done in VMCompute. Further, we couldn't do it for Hyper-V + // containers anyway. + + // In base spec + s.Hostname = c.FullHostname() + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + + // In s.Mounts + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + var isHyperV bool + if c.HostConfig.Isolation.IsDefault() { + // Container using default isolation, so take the default from the daemon configuration + isHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container may be requesting an explicit isolation mode. + isHyperV = c.HostConfig.Isolation.IsHyperV() + } + + // If the container has not been started, and has configs or secrets + // secrets, create symlinks to each confing and secret. If it has been + // started before, the symlinks should have already been created. Also, it + // is important to not mount a Hyper-V container that has been started + // before, to protect the host from the container; for example, from + // malicious mutation of NTFS data structures. + if !c.HasBeenStartedBefore && (len(c.SecretReferences) > 0 || len(c.ConfigReferences) > 0) { + // The container file system is mounted before this function is called, + // except for Hyper-V containers, so mount it here in that case. + if isHyperV { + if err := daemon.Mount(c); err != nil { + return nil, err + } + defer daemon.Unmount(c) + } + if err := c.CreateSecretSymlinks(); err != nil { + return nil, err + } + if err := c.CreateConfigSymlinks(); err != nil { + return nil, err + } + } + + if m := c.SecretMounts(); m != nil { + mounts = append(mounts, m...) + } + + if m := c.ConfigMounts(); m != nil { + mounts = append(mounts, m...) + } + + for _, mount := range mounts { + m := specs.Mount{ + Source: mount.Source, + Destination: mount.Destination, + } + if !mount.Writable { + m.Options = append(m.Options, "ro") + } + s.Mounts = append(s.Mounts, m) + } + + // In s.Process + s.Process.Args = append([]string{c.Path}, c.Args...) + if !c.Config.ArgsEscaped && img.OS == "windows" { + s.Process.Args = escapeArgs(s.Process.Args) + } + + s.Process.Cwd = c.Config.WorkingDir + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + if c.Config.Tty { + s.Process.Terminal = c.Config.Tty + s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] + s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] + } + s.Process.User.Username = c.Config.User + + if img.OS == "windows" { + daemon.createSpecWindowsFields(c, &s, isHyperV) + } else { + // TODO @jhowardmsft LCOW Support. Modify this check when running in dual-mode + if system.LCOWSupported() && img.OS == "linux" { + daemon.createSpecLinuxFields(c, &s) + } + } + + return (*specs.Spec)(&s), nil +} + +// Sets the Windows-specific fields of the OCI spec +func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.Spec, isHyperV bool) { + if len(s.Process.Cwd) == 0 { + // We default to C:\ to workaround the oddity of the case that the + // default directory for cmd running as LocalSystem (or + // ContainerAdministrator) is c:\windows\system32. Hence docker run + // cmd will by default end in c:\windows\system32, rather + // than 'root' (/) on Linux. The oddity is that if you have a dockerfile + // which has no WORKDIR and has a COPY file ., . will be interpreted + // as c:\. Hence, setting it to default of c:\ makes for consistency. + s.Process.Cwd = `C:\` + } + + s.Root.Readonly = false // Windows does not support a read-only root filesystem + if !isHyperV { + s.Root.Path = c.BaseFS // This is not set for Hyper-V containers + } + + // In s.Windows.Resources + cpuShares := uint16(c.HostConfig.CPUShares) + cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100 + cpuCount := uint64(c.HostConfig.CPUCount) + if c.HostConfig.NanoCPUs > 0 { + if isHyperV { + cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9) + leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 { + cpuCount++ + cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000)) + if cpuMaximum < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuMaximum = 1 + } + } + } else { + cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000)) + if cpuMaximum < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuMaximum = 1 + } + } + } + memoryLimit := uint64(c.HostConfig.Memory) + s.Windows.Resources = &specs.WindowsResources{ + CPU: &specs.WindowsCPUResources{ + Maximum: &cpuMaximum, + Shares: &cpuShares, + Count: &cpuCount, + }, + Memory: &specs.WindowsMemoryResources{ + Limit: &memoryLimit, + }, + Storage: &specs.WindowsStorageResources{ + Bps: &c.HostConfig.IOMaximumBandwidth, + Iops: &c.HostConfig.IOMaximumIOps, + }, + } +} + +// Sets the Linux-specific fields of the OCI spec +// TODO: @jhowardmsft LCOW Support. We need to do a lot more pulling in what can +// be pulled in from oci_linux.go. +func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spec) { + if len(s.Process.Cwd) == 0 { + s.Process.Cwd = `/` + } + s.Root.Path = "rootfs" + s.Root.Readonly = c.HostConfig.ReadonlyRootfs +} + +func escapeArgs(args []string) []string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = syscall.EscapeArg(a) + } + return escapedArgs +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/daemon/pause.go b/daemon/pause.go new file mode 100644 index 0000000..dbfafbc --- /dev/null +++ b/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/daemon/prune.go b/daemon/prune.go new file mode 100644 index 0000000..9a363e2 --- /dev/null +++ b/daemon/prune.go @@ -0,0 +1,468 @@ +package daemon + +import ( + "fmt" + "regexp" + "runtime" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" + digest "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +var ( + // errPruneRunning is returned when a prune request is received while + // one is in progress + errPruneRunning = fmt.Errorf("a prune operation is already running") + + containersAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, + } + volumesAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + } + imagesAcceptedFilters = map[string]bool{ + "dangling": true, + "label": true, + "label!": true, + "until": true, + } + networksAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, + } +) + +// ContainersPrune removes unused containers +func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + rep := &types.ContainersPruneReport{} + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(containersAcceptedFilters) + if err != nil { + return nil, err + } + + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err + } + + allContainers := daemon.List() + for _, c := range allContainers { + select { + case <-ctx.Done(): + logrus.Warnf("ContainersPrune operation cancelled: %#v", *rep) + return rep, ctx.Err() + default: + } + + if !c.IsRunning() { + if !until.IsZero() && c.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, c.Config.Labels) { + continue + } + cSize, _ := daemon.getSize(c.ID) + // TODO: sets RmLink to true? + err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) + if err != nil { + logrus.Warnf("failed to prune container %s: %v", c.ID, err) + continue + } + if cSize > 0 { + rep.SpaceReclaimed += uint64(cSize) + } + rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) + } + } + + return rep, nil +} + +// VolumesPrune removes unused local volumes +func (daemon *Daemon) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(volumesAcceptedFilters) + if err != nil { + return nil, err + } + + rep := &types.VolumesPruneReport{} + + pruneVols := func(v volume.Volume) error { + select { + case <-ctx.Done(): + logrus.Warnf("VolumesPrune operation cancelled: %#v", *rep) + return ctx.Err() + default: + } + + name := v.Name() + refs := daemon.volumes.Refs(v) + + if len(refs) == 0 { + detailedVolume, ok := v.(volume.DetailedVolume) + if ok { + if !matchLabels(pruneFilters, detailedVolume.Labels()) { + return nil + } + } + vSize, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("could not determine size of volume %s: %v", name, err) + } + err = daemon.volumes.Remove(v) + if err != nil { + logrus.Warnf("could not remove volume %s: %v", name, err) + return nil + } + rep.SpaceReclaimed += uint64(vSize) + rep.VolumesDeleted = append(rep.VolumesDeleted, name) + } + + return nil + } + + err = daemon.traverseLocalVolumes(pruneVols) + + return rep, err +} + +// ImagesPrune removes unused images +func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + // TODO @jhowardmsft LCOW Support: This will need revisiting later. + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(imagesAcceptedFilters) + if err != nil { + return nil, err + } + + rep := &types.ImagesPruneReport{} + + danglingOnly := true + if pruneFilters.Include("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling")) + } + } + + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err + } + + var allImages map[image.ID]*image.Image + if danglingOnly { + allImages = daemon.stores[platform].imageStore.Heads() + } else { + allImages = daemon.stores[platform].imageStore.Map() + } + allContainers := daemon.List() + imageRefs := map[string]bool{} + for _, c := range allContainers { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + imageRefs[c.ID] = true + } + } + + // Filter intermediary images and get their unique size + allLayers := daemon.stores[platform].layerStore.Map() + topImages := map[image.ID]*image.Image{} + for id, img := range allImages { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + dgst := digest.Digest(id) + if len(daemon.stores[platform].referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 { + continue + } + if !until.IsZero() && img.Created.After(until) { + continue + } + if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) { + continue + } + topImages[id] = img + } + } + + canceled := false +deleteImagesLoop: + for id := range topImages { + select { + case <-ctx.Done(): + // we still want to calculate freed size and return the data + canceled = true + break deleteImagesLoop + default: + } + + dgst := digest.Digest(id) + hex := dgst.Hex() + if _, ok := imageRefs[hex]; ok { + continue + } + + deletedImages := []types.ImageDeleteResponseItem{} + refs := daemon.stores[platform].referenceStore.References(dgst) + if len(refs) > 0 { + shouldDelete := !danglingOnly + if !shouldDelete { + hasTag := false + for _, ref := range refs { + if _, ok := ref.(reference.NamedTagged); ok { + hasTag = true + break + } + } + + // Only delete if it's untagged (i.e. repo:) + shouldDelete = !hasTag + } + + if shouldDelete { + for _, ref := range refs { + imgDel, err := daemon.ImageDelete(ref.String(), false, true) + if err != nil { + logrus.Warnf("could not delete reference %s: %v", ref.String(), err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + } + } else { + imgDel, err := daemon.ImageDelete(hex, false, true) + if err != nil { + logrus.Warnf("could not delete image %s: %v", hex, err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + + rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) + } + + // Compute how much space was freed + for _, d := range rep.ImagesDeleted { + if d.Deleted != "" { + chid := layer.ChainID(d.Deleted) + if l, ok := allLayers[chid]; ok { + diffSize, err := l.DiffSize() + if err != nil { + logrus.Warnf("failed to get layer %s size: %v", chid, err) + continue + } + rep.SpaceReclaimed += uint64(diffSize) + } + } + } + + if canceled { + logrus.Warnf("ImagesPrune operation cancelled: %#v", *rep) + return nil, ctx.Err() + } + + return rep, nil +} + +// localNetworksPrune removes unused local networks +func (daemon *Daemon) localNetworksPrune(ctx context.Context, pruneFilters filters.Args) *types.NetworksPruneReport { + rep := &types.NetworksPruneReport{} + + until, _ := getUntilFromPruneFilters(pruneFilters) + + // When the function returns true, the walk will stop. + l := func(nw libnetwork.Network) bool { + select { + case <-ctx.Done(): + return true + default: + } + if nw.Info().ConfigOnly() { + return false + } + if !until.IsZero() && nw.Info().Created().After(until) { + return false + } + if !matchLabels(pruneFilters, nw.Info().Labels()) { + return false + } + nwName := nw.Name() + if runconfig.IsPreDefinedNetwork(nwName) { + return false + } + if len(nw.Endpoints()) > 0 { + return false + } + if err := daemon.DeleteNetwork(nw.ID()); err != nil { + logrus.Warnf("could not remove local network %s: %v", nwName, err) + return false + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) + return false + } + daemon.netController.WalkNetworks(l) + return rep +} + +// clusterNetworksPrune removes unused cluster networks +func (daemon *Daemon) clusterNetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + + until, _ := getUntilFromPruneFilters(pruneFilters) + + cluster := daemon.GetCluster() + + if !cluster.IsManager() { + return rep, nil + } + + networks, err := cluster.GetNetworks() + if err != nil { + return rep, err + } + networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) + for _, nw := range networks { + select { + case <-ctx.Done(): + return rep, ctx.Err() + default: + if nw.Ingress { + // Routing-mesh network removal has to be explicitly invoked by user + continue + } + if !until.IsZero() && nw.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, nw.Labels) { + continue + } + // https://github.com/docker/docker/issues/24186 + // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. + // So we try to remove it anyway and check the error + err = cluster.RemoveNetwork(nw.ID) + if err != nil { + // we can safely ignore the "network .. is in use" error + match := networkIsInUse.FindStringSubmatch(err.Error()) + if len(match) != 2 || match[1] != nw.ID { + logrus.Warnf("could not remove cluster network %s: %v", nw.Name, err) + } + continue + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) + } + } + return rep, nil +} + +// NetworksPrune removes unused networks +func (daemon *Daemon) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(networksAcceptedFilters) + if err != nil { + return nil, err + } + + if _, err := getUntilFromPruneFilters(pruneFilters); err != nil { + return nil, err + } + + rep := &types.NetworksPruneReport{} + + localRep := daemon.localNetworksPrune(ctx, pruneFilters) + rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + + select { + case <-ctx.Done(): + logrus.Warnf("NetworksPrune operation cancelled: %#v", *rep) + return nil, ctx.Err() + default: + } + + return rep, nil +} + +func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { + until := time.Time{} + if !pruneFilters.Include("until") { + return until, nil + } + untilFilters := pruneFilters.Get("until") + if len(untilFilters) > 1 { + return until, fmt.Errorf("more than one until filter specified") + } + ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) + if err != nil { + return until, err + } + until = time.Unix(seconds, nanoseconds) + return until, nil +} + +func matchLabels(pruneFilters filters.Args, labels map[string]string) bool { + if !pruneFilters.MatchKVList("label", labels) { + return false + } + // By default MatchKVList will return true if field (like 'label!') does not exist + // So we have to add additional Include("label!") check + if pruneFilters.Include("label!") { + if pruneFilters.MatchKVList("label!", labels) { + return false + } + } + return true +} diff --git a/daemon/reload.go b/daemon/reload.go new file mode 100644 index 0000000..e474559 --- /dev/null +++ b/daemon/reload.go @@ -0,0 +1,312 @@ +package daemon + +import ( + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/libcontainerd" +) + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// These are the settings that Reload changes: +// - Platform runtime +// - Daemon debug log level +// - Daemon max concurrent downloads +// - Daemon max concurrent uploads +// - Daemon shutdown timeout (in seconds) +// - Cluster discovery (reconfigure and restart) +// - Daemon labels +// - Insecure registries +// - Registry mirrors +// - Daemon live restore +func (daemon *Daemon) Reload(conf *config.Config) (err error) { + daemon.configStore.Lock() + attributes := map[string]string{} + + defer func() { + // we're unlocking here, because + // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() + // holds that lock too. + daemon.configStore.Unlock() + if err == nil { + daemon.LogDaemonEventWithAttributes("reload", attributes) + } + }() + + daemon.reloadPlatform(conf, attributes) + daemon.reloadDebug(conf, attributes) + daemon.reloadMaxConcurrentDowloadsAndUploads(conf, attributes) + daemon.reloadShutdownTimeout(conf, attributes) + + if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { + return err + } + if err := daemon.reloadLabels(conf, attributes); err != nil { + return err + } + if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { + return err + } + if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { + return err + } + if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { + return err + } + if err := daemon.reloadLiveRestore(conf, attributes); err != nil { + return err + } + return nil +} + +// reloadDebug updates configuration with Debug option +// and updates the passed attributes +func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { + // update corresponding configuration + if conf.IsValueSet("debug") { + daemon.configStore.Debug = conf.Debug + } + // prepare reload event attributes with updatable configurations + attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) +} + +// reloadMaxConcurrentDowloadsAndUploads updates configuration with max concurrent +// download and upload options and updates the passed attributes +func (daemon *Daemon) reloadMaxConcurrentDowloadsAndUploads(conf *config.Config, attributes map[string]string) { + // If no value is set for max-concurrent-downloads we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { + *daemon.configStore.MaxConcurrentDownloads = *conf.MaxConcurrentDownloads + } else { + maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads + daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads + } + logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) + if daemon.downloadManager != nil { + daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) + } + + // prepare reload event attributes with updatable configurations + attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) + + // If no value is set for max-concurrent-upload we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { + *daemon.configStore.MaxConcurrentUploads = *conf.MaxConcurrentUploads + } else { + maxConcurrentUploads := config.DefaultMaxConcurrentUploads + daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads + } + logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) + if daemon.uploadManager != nil { + daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) + } + + // prepare reload event attributes with updatable configurations + attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) +} + +// reloadShutdownTimeout updates configuration with daemon shutdown timeout option +// and updates the passed attributes +func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { + // update corresponding configuration + if conf.IsValueSet("shutdown-timeout") { + daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout + logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) + } + + // prepare reload event attributes with updatable configurations + attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) +} + +// reloadClusterDiscovery updates configuration with cluster discovery options +// and updates the passed attributes +func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { + defer func() { + // prepare reload event attributes with updatable configurations + attributes["cluster-store"] = conf.ClusterStore + attributes["cluster-advertise"] = conf.ClusterAdvertise + + attributes["cluster-store-opts"] = "{}" + if daemon.configStore.ClusterOpts != nil { + opts, err2 := json.Marshal(conf.ClusterOpts) + if err != nil { + err = err2 + } + attributes["cluster-store-opts"] = string(opts) + } + }() + + newAdvertise := conf.ClusterAdvertise + newClusterStore := daemon.configStore.ClusterStore + if conf.IsValueSet("cluster-advertise") { + if conf.IsValueSet("cluster-store") { + newClusterStore = conf.ClusterStore + } + newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) + if err != nil && err != discovery.ErrDiscoveryDisabled { + return err + } + } + + if daemon.clusterProvider != nil { + if err := conf.IsSwarmCompatible(); err != nil { + return err + } + } + + // check discovery modifications + if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { + return nil + } + + // enable discovery for the first time if it was not previously enabled + if daemon.discoveryWatcher == nil { + discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) + if err != nil { + return fmt.Errorf("failed to initialize discovery: %v", err) + } + daemon.discoveryWatcher = discoveryWatcher + } else if err == discovery.ErrDiscoveryDisabled { + // disable discovery if it was previously enabled and it's disabled now + daemon.discoveryWatcher.Stop() + } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { + // reload discovery + return err + } + + daemon.configStore.ClusterStore = newClusterStore + daemon.configStore.ClusterOpts = conf.ClusterOpts + daemon.configStore.ClusterAdvertise = newAdvertise + + if daemon.netController == nil { + return nil + } + netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) + if err != nil { + logrus.WithError(err).Warnf("failed to get options with network controller") + return nil + } + err = daemon.netController.ReloadConfiguration(netOptions...) + if err != nil { + logrus.Warnf("Failed to reload configuration with network controller: %v", err) + } + return nil +} + +// reloadLabels updates configuration with engine labels +// and updates the passed attributes +func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("labels") { + daemon.configStore.Labels = conf.Labels + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.Labels != nil { + labels, err := json.Marshal(daemon.configStore.Labels) + if err != nil { + return err + } + attributes["labels"] = string(labels) + } else { + attributes["labels"] = "[]" + } + + return nil +} + +// reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options +// and updates the passed attributes. +func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { + // Update corresponding configuration. + if conf.IsValueSet("allow-nondistributable-artifacts") { + daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts + if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { + return err + } + } + + // Prepare reload event attributes with updatable configurations. + if daemon.configStore.AllowNondistributableArtifacts != nil { + v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) + if err != nil { + return err + } + attributes["allow-nondistributable-artifacts"] = string(v) + } else { + attributes["allow-nondistributable-artifacts"] = "[]" + } + + return nil +} + +// reloadInsecureRegistries updates configuration with insecure registry option +// and updates the passed attributes +func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("insecure-registries") { + daemon.configStore.InsecureRegistries = conf.InsecureRegistries + if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.InsecureRegistries != nil { + insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) + if err != nil { + return err + } + attributes["insecure-registries"] = string(insecureRegistries) + } else { + attributes["insecure-registries"] = "[]" + } + + return nil +} + +// reloadRegistryMirrors updates configuration with registry mirror options +// and updates the passed attributes +func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("registry-mirrors") { + daemon.configStore.Mirrors = conf.Mirrors + if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.Mirrors != nil { + mirrors, err := json.Marshal(daemon.configStore.Mirrors) + if err != nil { + return err + } + attributes["registry-mirrors"] = string(mirrors) + } else { + attributes["registry-mirrors"] = "[]" + } + + return nil +} + +// reloadLiveRestore updates configuration with live retore option +// and updates the passed attributes +func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("live-restore") { + daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled + if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(conf.LiveRestoreEnabled)); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) + return nil +} diff --git a/daemon/reload_test.go b/daemon/reload_test.go new file mode 100644 index 0000000..bf11b6b --- /dev/null +++ b/daemon/reload_test.go @@ -0,0 +1,474 @@ +// +build !solaris + +package daemon + +import ( + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/discovery" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/registry" +) + +func TestDaemonReloadLabels(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:bar"}, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:baz"}, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } +} + +func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{}, + } + + // Initialize daemon with some registries. + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + AllowNondistributableArtifacts: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // This will be removed during reload. + "docker1.com", + "docker2.com", // This will be removed during reload. + }, + }) + + registries := []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.33:5000", // This will be added during reload. + "docker1.com", + "docker3.com", // This will be added during reload. + } + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + AllowNondistributableArtifacts: registries, + }, + ValuesSet: map[string]interface{}{ + "allow-nondistributable-artifacts": registries, + }, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + actual := []string{} + serviceConfig := daemon.RegistryService.ServiceConfig() + for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs { + actual = append(actual, value.String()) + } + for _, value := range serviceConfig.AllowNondistributableArtifactsHostnames { + actual = append(actual, value) + } + + sort.Strings(registries) + sort.Strings(actual) + if !reflect.DeepEqual(registries, actual) { + t.Fatalf("expected %v, got %v\n", registries, actual) + } +} + +func TestDaemonReloadMirrors(t *testing.T) { + daemon := &Daemon{} + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{}, + Mirrors: []string{ + "https://mirror.test1.com", + "https://mirror.test2.com", // this will be removed when reloading + "https://mirror.test3.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &config.Config{} + + type pair struct { + valid bool + mirrors []string + after []string + } + + loadMirrors := []pair{ + { + valid: false, + mirrors: []string{"10.10.1.11:5000"}, // this mirror is invalid + after: []string{}, + }, + { + valid: false, + mirrors: []string{"mirror.test1.com"}, // this mirror is invalid + after: []string{}, + }, + { + valid: false, + mirrors: []string{"10.10.1.11:5000", "mirror.test1.com"}, // mirrors are invalid + after: []string{}, + }, + { + valid: true, + mirrors: []string{"https://mirror.test1.com", "https://mirror.test4.com"}, + after: []string{"https://mirror.test1.com/", "https://mirror.test4.com/"}, + }, + } + + for _, value := range loadMirrors { + valuesSets := make(map[string]interface{}) + valuesSets["registry-mirrors"] = value.mirrors + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + Mirrors: value.mirrors, + }, + ValuesSet: valuesSets, + }, + } + + err := daemon.Reload(newConfig) + if !value.valid && err == nil { + // mirrors should be invalid, should be a non-nil error + t.Fatalf("Expected daemon reload error with invalid mirrors: %s, while get nil", value.mirrors) + } + + if value.valid { + if err != nil { + // mirrors should be valid, should be no error + t.Fatal(err) + } + registryService := daemon.RegistryService.ServiceConfig() + + if len(registryService.Mirrors) != len(value.after) { + t.Fatalf("Expected %d daemon mirrors %s while get %d with %s", + len(value.after), + value.after, + len(registryService.Mirrors), + registryService.Mirrors) + } + + dataMap := map[string]struct{}{} + + for _, mirror := range registryService.Mirrors { + if _, exist := dataMap[mirror]; !exist { + dataMap[mirror] = struct{}{} + } + } + + for _, address := range value.after { + if _, exist := dataMap[address]; !exist { + t.Fatalf("Expected %s in daemon mirrors, while get none", address) + } + } + } + } +} + +func TestDaemonReloadInsecureRegistries(t *testing.T) { + daemon := &Daemon{} + // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // this will be removed when reloading + "docker1.com", + "docker2.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &config.Config{} + + insecureRegistries := []string{ + "127.0.0.0/8", // this will be kept + "10.10.1.11:5000", // this will be kept + "10.10.1.33:5000", // this will be newly added + "docker1.com", // this will be kept + "docker3.com", // this will be newly added + } + + valuesSets := make(map[string]interface{}) + valuesSets["insecure-registries"] = insecureRegistries + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + InsecureRegistries: insecureRegistries, + }, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + // After Reload, daemon.RegistryService will be changed which is useful + // for registry communication in daemon. + registries := daemon.RegistryService.ServiceConfig() + + // After Reload(), newConfig has come to registries.InsecureRegistryCIDRs and registries.IndexConfigs in daemon. + // Then collect registries.InsecureRegistryCIDRs in dataMap. + // When collecting, we need to convert CIDRS into string as a key, + // while the times of key appears as value. + dataMap := map[string]int{} + for _, value := range registries.InsecureRegistryCIDRs { + if _, ok := dataMap[value.String()]; !ok { + dataMap[value.String()] = 1 + } else { + dataMap[value.String()]++ + } + } + + for _, value := range registries.IndexConfigs { + if _, ok := dataMap[value.Name]; !ok { + dataMap[value.Name] = 1 + } else { + dataMap[value.Name]++ + } + } + + // Finally compare dataMap with the original insecureRegistries. + // Each value in insecureRegistries should appear in daemon's insecure registries, + // and each can only appear exactly ONCE. + for _, r := range insecureRegistries { + if value, ok := dataMap[r]; !ok { + t.Fatalf("Expected daemon insecure registry %s, got none", r) + } else if value != 1 { + t.Fatalf("Expected only 1 daemon insecure registry %s, got %d", r, value) + } + } + + // assert if "10.10.1.22:5000" is removed when reloading + if value, ok := dataMap["10.10.1.22:5000"]; ok { + t.Fatalf("Expected no insecure registry of 10.10.1.22:5000, got %d", value) + } + + // assert if "docker2.com" is removed when reloading + if value, ok := dataMap["docker2.com"]; ok { + t.Fatalf("Expected no insecure registry of docker2.com, got %d", value) + } +} + +func TestDaemonReloadNotAffectOthers(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:bar"}, + Debug: true, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:baz"}, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } + debug := daemon.configStore.Debug + if !debug { + t.Fatal("Expected debug 'enabled', got 'disabled'") + } +} + +func TestDaemonDiscoveryReload(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1", + ClusterAdvertise: "127.0.0.1:3333", + }, + } + + if err := daemon.initDiscovery(daemon.configStore); err != nil { + t.Fatal(err) + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + + valuesSets := make(map[string]interface{}) + valuesSets["cluster-store"] = "memory://127.0.0.1:2222" + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSets, + }, + } + + expected = discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + ch, errCh = daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{} + + valuesSet := make(map[string]interface{}) + valuesSet["cluster-store"] = "memory://127.0.0.1:2222" + valuesSet["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSet, + }, + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1", + }, + } + valuesSets := make(map[string]interface{}) + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSets, + }, + } + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(10 * time.Second): + t.Fatal("Timeout waiting for discovery") + } + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} diff --git a/daemon/rename.go b/daemon/rename.go new file mode 100644 index 0000000..ad21593 --- /dev/null +++ b/daemon/rename.go @@ -0,0 +1,123 @@ +package daemon + +import ( + "errors" + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + dockercontainer "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + var ( + sid string + sb libnetwork.Sandbox + ) + + if oldName == "" || newName == "" { + return errors.New("Neither old nor new names may be empty") + } + + if newName[0] != '/' { + newName = "/" + newName + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + oldName = container.Name + oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint + + if oldName == newName { + return errors.New("Renaming a container with the same name as its current name") + } + + container.Lock() + defer container.Unlock() + + links := map[string]*dockercontainer.Container{} + for k, v := range daemon.linkIndex.children(container) { + if !strings.HasPrefix(k, oldName) { + return fmt.Errorf("Linked container %s does not match parent %s", k, oldName) + } + links[strings.TrimPrefix(k, oldName)] = v + } + + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + for k, v := range links { + daemon.nameIndex.Reserve(newName+k, v.ID) + daemon.linkIndex.link(container, v, newName+k) + } + + container.Name = newName + container.NetworkSettings.IsAnonymousEndpoint = false + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + daemon.reserveName(container.ID, oldName) + for k, v := range links { + daemon.nameIndex.Reserve(oldName+k, v.ID) + daemon.linkIndex.link(container, v, oldName+k) + daemon.linkIndex.unlink(newName+k, v, container) + daemon.nameIndex.Release(newName + k) + } + daemon.releaseName(newName) + } + }() + + for k, v := range links { + daemon.linkIndex.unlink(oldName+k, v, container) + daemon.nameIndex.Release(oldName + k) + } + daemon.releaseName(oldName) + if err = container.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + if e := container.CheckpointTo(daemon.containersReplica); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + sid = container.NetworkSettings.SandboxID + if sid != "" && daemon.netController != nil { + sb, err = daemon.netController.SandboxByID(sid) + if err != nil { + return err + } + + err = sb.Rename(strings.TrimPrefix(container.Name, "/")) + if err != nil { + return err + } + } + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/daemon/resize.go b/daemon/resize.go new file mode 100644 index 0000000..7473538 --- /dev/null +++ b/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/daemon/restart.go b/daemon/restart.go new file mode 100644 index 0000000..9f2ef56 --- /dev/null +++ b/daemon/restart.go @@ -0,0 +1,70 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerRestart(container, *seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil + +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if container.IsRunning() { + // set AutoRemove flag to false before stop so the container won't be + // removed during restart process + autoRemove := container.HostConfig.AutoRemove + + container.HostConfig.AutoRemove = false + err := daemon.containerStop(container, seconds) + // restore AutoRemove irrespective of whether the stop worked or not + container.HostConfig.AutoRemove = autoRemove + // containerStop will write HostConfig to disk, we shall restore AutoRemove + // in disk too + if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil { + logrus.Errorf("Write container to disk error: %v", toDiskErr) + } + + if err != nil { + return err + } + } + + if err := daemon.containerStart(container, "", "", true); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/daemon/search.go b/daemon/search.go new file mode 100644 index 0000000..5d2ac5d --- /dev/null +++ b/daemon/search.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "fmt" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/dockerversion" +) + +var acceptedSearchFilterTags = map[string]bool{ + "is-automated": true, + "is-official": true, + "stars": true, +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + + searchFilters, err := filters.FromParam(filtersArgs) + if err != nil { + return nil, err + } + if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { + return nil, err + } + + var isAutomated, isOfficial bool + var hasStarFilter = 0 + if searchFilters.Include("is-automated") { + if searchFilters.UniqueExactMatch("is-automated", "true") { + isAutomated = true + } else if !searchFilters.UniqueExactMatch("is-automated", "false") { + return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) + } + } + if searchFilters.Include("is-official") { + if searchFilters.UniqueExactMatch("is-official", "true") { + isOfficial = true + } else if !searchFilters.UniqueExactMatch("is-official", "false") { + return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) + } + } + if searchFilters.Include("stars") { + hasStars := searchFilters.Get("stars") + for _, hasStar := range hasStars { + iHasStar, err := strconv.Atoi(hasStar) + if err != nil { + return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) + } + if iHasStar > hasStarFilter { + hasStarFilter = iHasStar + } + } + } + + unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) + if err != nil { + return nil, err + } + + filteredResults := []registrytypes.SearchResult{} + for _, result := range unfilteredResult.Results { + if searchFilters.Include("is-automated") { + if isAutomated != result.IsAutomated { + continue + } + } + if searchFilters.Include("is-official") { + if isOfficial != result.IsOfficial { + continue + } + } + if searchFilters.Include("stars") { + if result.StarCount < hasStarFilter { + continue + } + } + filteredResults = append(filteredResults, result) + } + + return ®istrytypes.SearchResults{ + Query: unfilteredResult.Query, + NumResults: len(filteredResults), + Results: filteredResults, + }, nil +} diff --git a/daemon/search_test.go b/daemon/search_test.go new file mode 100644 index 0000000..8523719 --- /dev/null +++ b/daemon/search_test.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "errors" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" +) + +type FakeService struct { + registry.DefaultService + + shouldReturnError bool + + term string + results []registrytypes.SearchResult +} + +func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + if s.shouldReturnError { + return nil, errors.New("Search unknown error") + } + return ®istrytypes.SearchResults{ + Query: s.term, + NumResults: len(s.results), + Results: s.results, + }, nil +} + +func TestSearchRegistryForImagesErrors(t *testing.T) { + errorCases := []struct { + filtersArgs string + shouldReturnError bool + expectedError string + }{ + { + expectedError: "Search unknown error", + shouldReturnError: true, + }, + { + filtersArgs: "invalid json", + expectedError: "invalid character 'i' looking for beginning of value", + }, + { + filtersArgs: `{"type":{"custom":true}}`, + expectedError: "Invalid filter 'type'", + }, + { + filtersArgs: `{"is-automated":{"invalid":true}}`, + expectedError: "Invalid filter 'is-automated=[invalid]'", + }, + { + filtersArgs: `{"is-automated":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-automated", + }, + { + filtersArgs: `{"is-official":{"invalid":true}}`, + expectedError: "Invalid filter 'is-official=[invalid]'", + }, + { + filtersArgs: `{"is-official":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-official", + }, + { + filtersArgs: `{"stars":{"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + { + filtersArgs: `{"stars":{"1":true,"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + } + for index, e := range errorCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + shouldReturnError: e.shouldReturnError, + }, + } + _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) + if err == nil { + t.Errorf("%d: expected an error, got nothing", index) + } + if !strings.Contains(err.Error(), e.expectedError) { + t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) + } + } +} + +func TestSearchRegistryForImages(t *testing.T) { + term := "term" + successCases := []struct { + filtersArgs string + registryResults []registrytypes.SearchResult + expectedResults []registrytypes.SearchResult + }{ + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{}, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + }, + { + filtersArgs: `{"stars":{"0":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + IsOfficial: true, + IsAutomated: true, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + IsOfficial: false, + IsAutomated: true, + }, + { + Name: "name2", + Description: "description2", + StarCount: 1, + IsOfficial: true, + IsAutomated: false, + }, + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + }, + } + for index, s := range successCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + term: term, + results: s.registryResults, + }, + } + results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) + if err != nil { + t.Errorf("%d: %v", index, err) + } + if results.Query != term { + t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) + } + if results.NumResults != len(s.expectedResults) { + t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) + } + for _, result := range results.Results { + found := false + for _, expectedResult := range s.expectedResults { + if expectedResult.Name == result.Name && + expectedResult.Description == result.Description && + expectedResult.IsAutomated == result.IsAutomated && + expectedResult.IsOfficial == result.IsOfficial && + expectedResult.StarCount == result.StarCount { + found = true + break + } + } + if !found { + t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) + } + } + } +} diff --git a/daemon/seccomp_disabled.go b/daemon/seccomp_disabled.go new file mode 100644 index 0000000..ff1127b --- /dev/null +++ b/daemon/seccomp_disabled.go @@ -0,0 +1,19 @@ +// +build linux,!seccomp + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = false + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") + } + return nil +} diff --git a/daemon/seccomp_linux.go b/daemon/seccomp_linux.go new file mode 100644 index 0000000..472e313 --- /dev/null +++ b/daemon/seccomp_linux.go @@ -0,0 +1,55 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = true + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.LinuxSeccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile, rs) + if err != nil { + return err + } + } else { + if daemon.seccompProfile != nil { + profile, err = seccomp.LoadProfile(string(daemon.seccompProfile), rs) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile(rs) + if err != nil { + return err + } + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/daemon/seccomp_unsupported.go b/daemon/seccomp_unsupported.go new file mode 100644 index 0000000..b3691e9 --- /dev/null +++ b/daemon/seccomp_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux + +package daemon + +var supportsSeccomp = false diff --git a/daemon/secrets.go b/daemon/secrets.go new file mode 100644 index 0000000..90fa99e --- /dev/null +++ b/daemon/secrets.go @@ -0,0 +1,23 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// SetContainerSecretReferences sets the container secret references needed +func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { + if !secretsSupported() && len(refs) > 0 { + logrus.Warn("secrets are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretReferences = refs + + return nil +} diff --git a/daemon/secrets_linux.go b/daemon/secrets_linux.go new file mode 100644 index 0000000..fca4e12 --- /dev/null +++ b/daemon/secrets_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/daemon/secrets_unsupported.go b/daemon/secrets_unsupported.go new file mode 100644 index 0000000..d55e862 --- /dev/null +++ b/daemon/secrets_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package daemon + +func secretsSupported() bool { + return false +} diff --git a/daemon/secrets_windows.go b/daemon/secrets_windows.go new file mode 100644 index 0000000..9054354 --- /dev/null +++ b/daemon/secrets_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/daemon/selinux_linux.go b/daemon/selinux_linux.go new file mode 100644 index 0000000..fb2578b --- /dev/null +++ b/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/selinux/go-selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.ReleaseLabel(label) +} + +func selinuxEnabled() bool { + return selinux.GetEnabled() +} diff --git a/daemon/selinux_unsupported.go b/daemon/selinux_unsupported.go new file mode 100644 index 0000000..25a56ad --- /dev/null +++ b/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/daemon/start.go b/daemon/start.go new file mode 100644 index 0000000..e57aadf --- /dev/null +++ b/daemon/start.go @@ -0,0 +1,228 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { + if checkpoint != "" && !daemon.HasExperimental() { + return apierrors.NewBadRequestError(fmt.Errorf("checkpoint is only supported in experimental mode")) + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return apierrors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and has been removed in Docker 1.12 + container.NetworkSettings.Networks = nil + container.Lock() + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + container.Unlock() + return err + } + container.Unlock() + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if hostConfig != nil { + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + } + + return daemon.containerStart(container, checkpoint, checkpointDir, true) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { + start := time.Now() + container.Lock() + defer container.Unlock() + + if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode() == 0 { + container.SetExitCode(128) + } + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err) + } + container.Reset(false) + + daemon.Cleanup(container) + // if containers AutoRemove flag is set, remove it after clean up + if container.HostConfig.AutoRemove { + container.Unlock() + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", container.ID, err) + } + container.Lock() + } + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + createOptions, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if resetRestartManager { + container.ResetRestartManager(true) + } + + if checkpointDir == "" { + checkpointDir = container.CheckpointDir() + } + + if daemon.saveApparmorConfig(container); err != nil { + return err + } + + if err := daemon.containerd.Create(container.ID, checkpoint, checkpointDir, *spec, container.InitializeStdio, createOptions...); err != nil { + errDesc := grpc.ErrorDesc(err) + contains := func(s1, s2 string) bool { + return strings.Contains(strings.ToLower(s1), s2) + } + logrus.Errorf("Create container failed with error: %s", errDesc) + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if contains(errDesc, container.Path) && + (contains(errDesc, "executable file not found") || + contains(errDesc, "no such file or directory") || + contains(errDesc, "system cannot find the file specified")) { + container.SetExitCode(127) + } + // set to 126 for container cmd can't be invoked errors + if contains(errDesc, syscall.EACCES.Error()) { + container.SetExitCode(126) + } + + // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts + if contains(errDesc, syscall.ENOTDIR.Error()) { + errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" + container.SetExitCode(127) + } + + return fmt.Errorf("%s", errDesc) + } + + containerActions.WithValues("start").UpdateSince(start) + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.stores[container.Platform].layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + if err := container.UnmountSecrets(); err != nil { + logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/daemon/start_unix.go b/daemon/start_unix.go new file mode 100644 index 0000000..12ecdab --- /dev/null +++ b/daemon/start_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// getLibcontainerdCreateOptions callers must hold a lock on the container +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Ensure a runtime has been assigned to this container + if container.HostConfig.Runtime == "" { + container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + container.CheckpointTo(daemon.containersReplica) + } + + rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) + if rt == nil { + return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) + } + if UsingSystemd(daemon.configStore) { + rt.Args = append(rt.Args, "--systemd-cgroup=true") + } + createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) + + return createOptions, nil +} diff --git a/daemon/start_windows.go b/daemon/start_windows.go new file mode 100644 index 0000000..da70a16 --- /dev/null +++ b/daemon/start_windows.go @@ -0,0 +1,210 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "golang.org/x/sys/windows/registry" +) + +const ( + credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + credentialSpecFileLocation = "CredentialSpecs" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Are we going to run as a Hyper-V container? + hvOpts := &libcontainerd.HyperVIsolationOption{} + if container.HostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container is requesting an isolation mode. Honour it. + hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() + } + + dnsSearch := daemon.getDNSSearchSettings(container) + + // Generate the layer folder of the layer options + layerOpts := &libcontainerd.LayerOption{} + m, err := container.RWLayer.Metadata() + if err != nil { + return nil, fmt.Errorf("failed to get layer metadata - %s", err) + } + layerOpts.LayerFolderPath = m["dir"] + + // Generate the layer paths of the layer options + img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID) + if err != nil { + return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) + } + // Get the layer path for each layer. + max := len(img.RootFS.DiffIDs) + for i := 1; i <= max; i++ { + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] + layerPath, err := layer.GetLayerPath(daemon.stores[container.Platform].layerStore, img.RootFS.ChainID()) + if err != nil { + return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.stores[container.Platform].layerStore, img.RootFS.ChainID(), err) + } + // Reverse order, expecting parent most first + layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) + } + + // Get endpoints for the libnetwork allocated networks to the container + var epList []string + AllowUnqualifiedDNSQuery := false + gwHNSID := "" + if container.NetworkSettings != nil { + for n := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := container.GetEndpointInNetwork(sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + gwHNSID = gwInfo["hnsid"].(string) + } + } + + if data["hnsid"] != nil { + epList = append(epList, data["hnsid"].(string)) + } + + if data["AllowUnqualifiedDNSQuery"] != nil { + AllowUnqualifiedDNSQuery = true + } + } + } + + if gwHNSID != "" { + epList = append(epList, gwHNSID) + } + + // Read and add credentials from the security options if a credential spec has been provided. + if container.HostConfig.SecurityOpt != nil { + for _, sOpt := range container.HostConfig.SecurityOpt { + sOpt = strings.ToLower(sOpt) + if !strings.Contains(sOpt, "=") { + return nil, fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) + } + var splitsOpt []string + splitsOpt = strings.SplitN(sOpt, "=", 2) + if len(splitsOpt) != 2 { + return nil, fmt.Errorf("invalid security option: %s", sOpt) + } + if splitsOpt[0] != "credentialspec" { + return nil, fmt.Errorf("security option not supported: %s", splitsOpt[0]) + } + + credentialsOpts := &libcontainerd.CredentialsOption{} + var ( + match bool + csValue string + err error + ) + if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for file:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecFile(container.ID, daemon.root, filepath.Clean(csValue)); err != nil { + return nil, err + } + } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for registry:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecRegistry(container.ID, csValue); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") + } + createOptions = append(createOptions, credentialsOpts) + } + } + + // Now add the remaining options. + createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) + createOptions = append(createOptions, hvOpts) + createOptions = append(createOptions, layerOpts) + + var networkSharedContainerID string + if container.HostConfig.NetworkMode.IsContainer() { + networkSharedContainerID = container.NetworkSharedContainerID + } + createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{ + Endpoints: epList, + AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery, + DNSSearchList: dnsSearch, + NetworkSharedContainerID: networkSharedContainerID, + }) + return createOptions, nil +} + +// getCredentialSpec is a helper function to get the value of a credential spec supplied +// on the CLI, stripping the prefix +func getCredentialSpec(prefix, value string) (bool, string) { + if strings.HasPrefix(value, prefix) { + return true, strings.TrimPrefix(value, prefix) + } + return false, "" +} + +// readCredentialSpecRegistry is a helper function to read a credential spec from +// the registry. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecRegistry(id, name string) (string, error) { + var ( + k registry.Key + err error + val string + ) + if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { + return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) + } + if val, _, err = k.GetStringValue(name); err != nil { + if err == registry.ErrNotExist { + return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) + } + return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) + } + return val, nil +} + +// readCredentialSpecFile is a helper function to read a credential spec from +// a file. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecFile(id, root, location string) (string, error) { + if filepath.IsAbs(location) { + return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") + } + base := filepath.Join(root, credentialSpecFileLocation) + full := filepath.Join(base, location) + if !strings.HasPrefix(full, base) { + return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) + } + bcontents, err := ioutil.ReadFile(full) + if err != nil { + return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) + } + return string(bcontents[:]), nil +} diff --git a/daemon/stats.go b/daemon/stats.go new file mode 100644 index 0000000..926f32e --- /dev/null +++ b/daemon/stats.go @@ -0,0 +1,160 @@ +package daemon + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "solaris" { + return fmt.Errorf("%+v does not support stats", runtime.GOOS) + } + // Engine API version (used for backwards compatibility) + apiVersion := config.Version + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is either not running or restarting and requires no stream, return an empty stats. + if (!container.IsRunning() || container.IsRestarting()) && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{ + Name: container.Name, + ID: container.ID}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + var preRead time.Time + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.Name = container.Name + ss.ID = container.ID + ss.PreCPUStats = preCPUStats + ss.PreRead = preRead + preCPUStats = ss.CPUStats + preRead = ss.Read + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if versions.LessThan(apiVersion, "1.21") { + if runtime.GOOS == "windows" { + return errors.New("API versions pre v1.21 do not support stats on Windows") + } + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-ctx.Done(): + return nil + } + } +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.Collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.Unsubscribe(c, ch) +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + // We already have the network stats on Windows directly from HCS. + if !container.Config.NetworkDisabled && runtime.GOOS != "windows" { + if stats.Networks, err = daemon.getNetworkStats(container); err != nil { + return nil, err + } + } + + return stats, nil +} diff --git a/daemon/stats/collector.go b/daemon/stats/collector.go new file mode 100644 index 0000000..71bcff4 --- /dev/null +++ b/daemon/stats/collector.go @@ -0,0 +1,116 @@ +// +build !solaris + +package stats + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +// Collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *Collector) Collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// StopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *Collector) StopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// Unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +// Run starts the collectors and will indefinitely collect stats from the supervisor +func (s *Collector) Run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + onlineCPUs, err := s.getNumberOnlineCPUs() + if err != nil { + logrus.Errorf("collecting system online cpu count: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + if err != nil { + if _, ok := err.(notRunningErr); !ok { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + continue + } + + // publish empty stats containing only name and ID if not running + pair.publisher.Publish(types.StatsJSON{ + Name: pair.container.Name, + ID: pair.container.ID, + }) + continue + } + // FIXME: move to containerd on Linux (not Windows) + stats.CPUStats.SystemUsage = systemUsage + stats.CPUStats.OnlineCPUs = onlineCPUs + + pair.publisher.Publish(*stats) + } + } +} + +type notRunningErr interface { + error + ContainerIsRunning() bool +} diff --git a/daemon/stats/collector_solaris.go b/daemon/stats/collector_solaris.go new file mode 100644 index 0000000..3699d08 --- /dev/null +++ b/daemon/stats/collector_solaris.go @@ -0,0 +1,29 @@ +package stats + +import ( + "github.com/docker/docker/container" +) + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *Collector) { +} + +// Collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +// Currently not supported on Solaris +func (s *Collector) Collect(c *container.Container) chan interface{} { + return nil +} + +// StopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +// Currently not supported on Solaris +func (s *Collector) StopCollection(c *container.Container) { +} + +// Unsubscribe removes a specific subscriber from receiving updates for a container's stats. +// Currently not supported on Solaris +func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) { +} diff --git a/daemon/stats/collector_unix.go b/daemon/stats/collector_unix.go new file mode 100644 index 0000000..cd522e0 --- /dev/null +++ b/daemon/stats/collector_unix.go @@ -0,0 +1,83 @@ +// +build !windows,!solaris + +package stats + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/system" +) + +/* +#include +*/ +import "C" + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. +func platformNewStatsCollector(s *Collector) { + s.clockTicksPerSecond = uint64(system.GetClockTicks()) +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *Collector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} + +func (s *Collector) getNumberOnlineCPUs() (uint32, error) { + i, err := C.sysconf(C._SC_NPROCESSORS_ONLN) + // According to POSIX - errno is undefined after successful + // sysconf, and can be non-zero in several cases, so look for + // error in returned value not in errno. + // (https://sourceware.org/bugzilla/show_bug.cgi?id=21536) + if i == -1 { + return 0, err + } + return uint32(i), nil +} diff --git a/daemon/stats/collector_windows.go b/daemon/stats/collector_windows.go new file mode 100644 index 0000000..5fb27ce --- /dev/null +++ b/daemon/stats/collector_windows.go @@ -0,0 +1,19 @@ +// +build windows + +package stats + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *Collector) { +} + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. This is a no-op on Windows. +func (s *Collector) getSystemCPUUsage() (uint64, error) { + return 0, nil +} + +func (s *Collector) getNumberOnlineCPUs() (uint32, error) { + return 0, nil +} diff --git a/daemon/stats/types.go b/daemon/stats/types.go new file mode 100644 index 0000000..e48783c --- /dev/null +++ b/daemon/stats/types.go @@ -0,0 +1,42 @@ +package stats + +import ( + "bufio" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +type supervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// NewCollector creates a stats collector that will poll the supervisor with the specified interval +func NewCollector(supervisor supervisor, interval time.Duration) *Collector { + s := &Collector{ + interval: interval, + supervisor: supervisor, + publishers: make(map[*container.Container]*pubsub.Publisher), + bufReader: bufio.NewReaderSize(nil, 128), + } + + platformNewStatsCollector(s) + + return s +} + +// Collector manages and provides container resource stats +type Collector struct { + m sync.Mutex + supervisor supervisor + interval time.Duration + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + + // The following fields are not set on Windows currently. + clockTicksPerSecond uint64 +} diff --git a/daemon/stats_collector.go b/daemon/stats_collector.go new file mode 100644 index 0000000..7daf26f --- /dev/null +++ b/daemon/stats_collector.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "runtime" + "time" + + "github.com/docker/docker/daemon/stats" + "github.com/docker/docker/pkg/system" +) + +// newStatsCollector returns a new statsCollector that collections +// stats for a registered container at the specified interval. +// The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *stats.Collector { + // FIXME(vdemeester) move this elsewhere + if runtime.GOOS == "linux" { + meminfo, err := system.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + daemon.machineMemory = uint64(meminfo.MemTotal) + } + } + s := stats.NewCollector(daemon, interval) + go s.Run() + return s +} diff --git a/daemon/stats_unix.go b/daemon/stats_unix.go new file mode 100644 index 0000000..d875607 --- /dev/null +++ b/daemon/stats_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Resolve Network SandboxID in case the container reuse another container's network stack +func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { + curr := c + for curr.HostConfig.NetworkMode.IsContainer() { + containerID := curr.HostConfig.NetworkMode.ConnectedContainer() + connected, err := daemon.GetContainer(containerID) + if err != nil { + return "", fmt.Errorf("Could not get container for %s", containerID) + } + curr = connected + } + return curr.NetworkSettings.SandboxID, nil +} + +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + sandboxID, err := daemon.getNetworkSandboxID(c) + if err != nil { + return nil, err + } + + sb, err := daemon.netController.SandboxByID(sandboxID) + if err != nil { + return nil, err + } + + lnstats, err := sb.Statistics() + if err != nil { + return nil, err + } + + stats := make(map[string]types.NetworkStats) + // Convert libnetwork nw stats into api stats + for ifName, ifStats := range lnstats { + stats[ifName] = types.NetworkStats{ + RxBytes: ifStats.RxBytes, + RxPackets: ifStats.RxPackets, + RxErrors: ifStats.RxErrors, + RxDropped: ifStats.RxDropped, + TxBytes: ifStats.TxBytes, + TxPackets: ifStats.TxPackets, + TxErrors: ifStats.TxErrors, + TxDropped: ifStats.TxDropped, + } + } + + return stats, nil +} diff --git a/daemon/stats_windows.go b/daemon/stats_windows.go new file mode 100644 index 0000000..f8e6f6f --- /dev/null +++ b/daemon/stats_windows.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Windows network stats are obtained directly through HCS, hence this is a no-op. +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + return make(map[string]types.NetworkStats), nil +} diff --git a/daemon/stop.go b/daemon/stop.go new file mode 100644 index 0000000..6a4776d --- /dev/null +++ b/daemon/stop.go @@ -0,0 +1,91 @@ +package daemon + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + containerpkg "github.com/docker/docker/container" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerStop(container, *seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + daemon.stopHealthchecks(container) + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // it's probably because it's already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we force kill it. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + } + + // 2. Wait for the process to exit on its own + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + // Wait without a timeout, ignore result. + _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/daemon/top_unix.go b/daemon/top_unix.go new file mode 100644 index 0000000..4d94d4a --- /dev/null +++ b/daemon/top_unix.go @@ -0,0 +1,155 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types/container" +) + +func validatePSArgs(psArgs string) error { + // NOTE: \\s does not detect unicode whitespaces. + // So we use fieldsASCII instead of strings.Fields in parsePSOutput. + // See https://github.com/docker/docker/pull/24358 + re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") + for _, group := range re.FindAllStringSubmatch(psArgs, -1) { + if len(group) >= 3 { + k := group[1] + v := group[2] + if k != "pid" { + return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) + } + } + } + return nil +} + +// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces +func fieldsASCII(s string) []string { + fn := func(r rune) bool { + switch r { + case '\t', '\n', '\f', '\r', ' ': + return true + } + return false + } + return strings.FieldsFunc(s, fn) +} + +func appendProcess2ProcList(procList *container.ContainerTopOKBody, fields []string) { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) +} + +func hasPid(pids []int, pid int) bool { + for _, i := range pids { + if i == pid { + return true + } + } + return false +} + +func parsePSOutput(output []byte, pids []int) (*container.ContainerTopOKBody, error) { + procList := &container.ContainerTopOKBody{} + + lines := strings.Split(string(output), "\n") + procList.Titles = fieldsASCII(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + // fixing #30580, be able to display thread line also when "m" option used + // in "docker top" client command + preContainedPidFlag := false + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := fieldsASCII(line) + + var ( + p int + err error + ) + + if fields[pidIndex] == "-" { + if preContainedPidFlag { + appendProcess2ProcList(procList, fields) + } + continue + } + p, err = strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + if hasPid(pids, p) { + preContainedPidFlag = true + appendProcess2ProcList(procList, fields) + continue + } + preContainedPidFlag = false + } + return procList, nil +} + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) { + if psArgs == "" { + psArgs = "-ef" + } + + if err := validatePSArgs(psArgs); err != nil { + return nil, err + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + procList, err := parsePSOutput(output, pids) + if err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/daemon/top_unix_test.go b/daemon/top_unix_test.go new file mode 100644 index 0000000..9a3749f --- /dev/null +++ b/daemon/top_unix_test.go @@ -0,0 +1,79 @@ +//+build !windows + +package daemon + +import ( + "testing" +) + +func TestContainerTopValidatePSArgs(t *testing.T) { + tests := map[string]bool{ + "ae -o uid=PID": true, + "ae -o \"uid= PID\"": true, // ascii space (0x20) + "ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83) + "ae o uid=PID": true, + "aeo uid=PID": true, + "ae -O uid=PID": true, + "ae -o pid=PID2 -o uid=PID": true, + "ae -o pid=PID": false, + "ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this + "aeo pid=PID": false, + "ae": false, + "": false, + } + for psArgs, errExpected := range tests { + err := validatePSArgs(psArgs) + t.Logf("tested %q, got err=%v", psArgs, err) + if errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, psArgs) + } + if !errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, psArgs) + } + } +} + +func TestContainerTopParsePSOutput(t *testing.T) { + tests := []struct { + output []byte + pids []int + errExpected bool + }{ + {[]byte(` PID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, false}, + {[]byte(` UID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, true}, + // unicode space (U+2003, 0xe2 0x80 0x83) + {[]byte(` PID COMMAND + 42 foo + 43 bar + - - + 100 baz +`), []int{42, 43}, true}, + // the first space is U+2003, the second one is ascii. + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + } + + for _, f := range tests { + _, err := parsePSOutput(f.output, f.pids) + t.Logf("tested %q, got err=%v", string(f.output), err) + if f.errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, string(f.output)) + } + if !f.errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, string(f.output)) + } + } +} diff --git a/daemon/top_windows.go b/daemon/top_windows.go new file mode 100644 index 0000000..000720b --- /dev/null +++ b/daemon/top_windows.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "errors" + "fmt" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +// ContainerTop handles `docker top` client requests. +// Future considerations: +// -- Windows users are far more familiar with CPU% total. +// Further, users on Windows rarely see user/kernel CPU stats split. +// The kernel returns everything in terms of 100ns. To obtain +// CPU%, we could do something like docker stats does which takes two +// samples, subtract the difference and do the maths. Unfortunately this +// would slow the stat call down and require two kernel calls. So instead, +// we do something similar to linux and display the CPU as combined HH:MM:SS.mmm. +// -- Perhaps we could add an argument to display "raw" stats +// -- "Memory" is an extremely overloaded term in Windows. Hence we do what +// task manager does and use the private working set as the memory counter. +// We could return more info for those who really understand how memory +// management works in Windows if we introduced a "raw" stats (above). +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*containertypes.ContainerTopOKBody, error) { + // It's not at all an equivalent to linux 'ps' on Windows + if psArgs != "" { + return nil, errors.New("Windows does not support arguments to top") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + s, err := daemon.containerd.Summary(container.ID) + if err != nil { + return nil, err + } + procList := &containertypes.ContainerTopOKBody{} + procList.Titles = []string{"Name", "PID", "CPU", "Private Working Set"} + + for _, j := range s { + d := time.Duration((j.KernelTime100ns + j.UserTime100ns) * 100) // Combined time in nanoseconds + procList.Processes = append(procList.Processes, []string{ + j.ImageName, + fmt.Sprint(j.ProcessId), + fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000), + units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))}) + } + return procList, nil +} diff --git a/daemon/unpause.go b/daemon/unpause.go new file mode 100644 index 0000000..e66b386 --- /dev/null +++ b/daemon/unpause.go @@ -0,0 +1,38 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/daemon/update.go b/daemon/update.go new file mode 100644 index 0000000..a65cbd5 --- /dev/null +++ b/daemon/update.go @@ -0,0 +1,86 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + return container.ContainerUpdateOKBody{Warnings: warnings}, nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.CheckpointTo(daemon.containersReplica) + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + container.Lock() + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + container.Unlock() + return errCannotUpdate(container.ID, err) + } + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + restoreConfig = true + container.Unlock() + return errCannotUpdate(container.ID, err) + } + container.Unlock() + + // if Restart Policy changed, we need to update container monitor + if hostConfig.RestartPolicy.Name != "" { + container.UpdateMonitor(hostConfig.RestartPolicy) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/daemon/update_linux.go b/daemon/update_linux.go new file mode 100644 index 0000000..c128967 --- /dev/null +++ b/daemon/update_linux.go @@ -0,0 +1,32 @@ +// +build linux + +package daemon + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint64(resources.BlkioWeight) + r.CpuShares = uint64(resources.CPUShares) + if resources.NanoCPUs != 0 { + r.CpuPeriod = uint64(100 * time.Millisecond / time.Microsecond) + r.CpuQuota = uint64(resources.NanoCPUs) * r.CpuPeriod / 1e9 + } else { + r.CpuPeriod = uint64(resources.CPUPeriod) + r.CpuQuota = uint64(resources.CPUQuota) + } + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint64(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint64(resources.MemorySwap) + } + r.MemoryReservation = uint64(resources.MemoryReservation) + r.KernelMemoryLimit = uint64(resources.KernelMemory) + return r +} diff --git a/daemon/update_solaris.go b/daemon/update_solaris.go new file mode 100644 index 0000000..f3b545c --- /dev/null +++ b/daemon/update_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/daemon/update_windows.go b/daemon/update_windows.go new file mode 100644 index 0000000..0146626 --- /dev/null +++ b/daemon/update_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/daemon/volumes.go b/daemon/volumes.go new file mode 100644 index 0000000..6f24f05 --- /dev/null +++ b/daemon/volumes.go @@ -0,0 +1,395 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/Sirupsen/logrus" + dockererrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the Engine API +func volumeToAPIType(v volume.Volume) *types.Volume { + createdAt, _ := v.CreatedAt() + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + CreatedAt: createdAt.Format(time.RFC3339), + } + if v, ok := v.(volume.DetailedVolume); ok { + tv.Labels = v.Labels() + tv.Options = v.Options() + tv.Scope = v.Scope() + } + + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + defer func() { + // clean up the container mountpoints once return with error + if retErr != nil { + for _, m := range mountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + } + } + }() + + dereferenceIfExists := func(destination string) { + if v, ok := mountPoints[destination]; ok { + logrus.Debugf("Duplicate mount point '%s'", destination) + if v.Volume != nil { + daemon.volumes.Dereference(v.Volume, container.ID) + } + } + } + + // 1. Read already configured mount points. + for destination, point := range container.MountPoints { + mountPoints[destination] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Spec: m.Spec, + CopyData: false, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + dereferenceIfExists(cp.Destination) + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + // #10618 + _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] + if binds[bind.Destination] || tmpfsExists { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if bind.Type == mounttypes.TypeVolume { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + if bind.Driver == volume.DefaultDriverName { + setBindModeIfNull(bind) + } + } + + binds[bind.Destination] = true + dereferenceIfExists(bind.Destination) + mountPoints[bind.Destination] = bind + } + + for _, cfg := range hostConfig.Mounts { + mp, err := volume.ParseMountSpec(cfg) + if err != nil { + return dockererrors.NewBadRequestError(err) + } + + if binds[mp.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", cfg.Target) + } + + if mp.Type == mounttypes.TypeVolume { + var v volume.Volume + if cfg.VolumeOptions != nil { + var driverOpts map[string]string + if cfg.VolumeOptions.DriverConfig != nil { + driverOpts = cfg.VolumeOptions.DriverConfig.Options + } + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels) + } else { + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil) + } + if err != nil { + return err + } + + mp.Volume = v + mp.Name = v.Name() + mp.Driver = v.DriverName() + + // only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow + if cv, ok := v.(interface { + CachedPath() string + }); ok { + mp.Source = cv.CachedPath() + } + } + + binds[mp.Destination] = true + dereferenceIfExists(mp.Destination) + mountPoints[mp.Destination] = mp + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} + +// backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13 +// mount configurations +// The container lock should not be held when calling this function. +// Changes are only made in-memory and may make changes to containers referenced +// by `container.HostConfig.VolumesFrom` +func (daemon *Daemon) backportMountSpec(container *container.Container) { + container.Lock() + defer container.Unlock() + + maybeUpdate := make(map[string]bool) + for _, mp := range container.MountPoints { + if mp.Spec.Source != "" && mp.Type != "" { + continue + } + maybeUpdate[mp.Destination] = true + } + if len(maybeUpdate) == 0 { + return + } + + mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts)) + for _, m := range container.HostConfig.Mounts { + mountSpecs[m.Target] = true + } + + binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds)) + for _, rawSpec := range container.HostConfig.Binds { + mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) + if err != nil { + logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") + continue + } + binds[mp.Destination] = mp + } + + volumesFrom := make(map[string]volume.MountPoint) + for _, fromSpec := range container.HostConfig.VolumesFrom { + from, _, err := volume.ParseVolumesFrom(fromSpec) + if err != nil { + logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") + continue + } + fromC, err := daemon.GetContainer(from) + if err != nil { + logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container") + continue + } + + // make sure from container's specs have been backported + daemon.backportMountSpec(fromC) + + fromC.Lock() + for t, mp := range fromC.MountPoints { + volumesFrom[t] = *mp + } + fromC.Unlock() + } + + needsUpdate := func(containerMount, other *volume.MountPoint) bool { + if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) { + return true + } + return false + } + + // main + for _, cm := range container.MountPoints { + if !maybeUpdate[cm.Destination] { + continue + } + // nothing to backport if from hostconfig.Mounts + if mountSpecs[cm.Destination] { + continue + } + + if mp, exists := binds[cm.Destination]; exists { + if needsUpdate(cm, mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Name != "" { + if mp, exists := volumesFrom[cm.Destination]; exists { + if needsUpdate(cm, &mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type + } + continue + } + + if cm.Type != "" { + // probably specified via the hostconfig.Mounts + continue + } + + // anon volume + cm.Type = mounttypes.TypeVolume + cm.Spec.Type = mounttypes.TypeVolume + } else { + if cm.Type != "" { + // already updated + continue + } + + cm.Type = mounttypes.TypeBind + cm.Spec.Type = mounttypes.TypeBind + cm.Spec.Source = cm.Source + if cm.Propagation != "" { + cm.Spec.BindOptions = &mounttypes.BindOptions{ + Propagation: cm.Propagation, + } + } + } + + cm.Spec.Target = cm.Destination + cm.Spec.ReadOnly = !cm.RW + } +} + +func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { + localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return fmt.Errorf("can't retrieve local volume driver: %v", err) + } + vols, err := localVolumeDriver.List() + if err != nil { + return fmt.Errorf("can't retrieve local volumes: %v", err) + } + + for _, v := range vols { + name := v.Name() + vol, err := daemon.volumes.Get(name) + if err != nil { + logrus.Warnf("failed to retrieve volume %s from store: %v", name, err) + } else { + // daemon.volumes.Get will return DetailedVolume + v = vol + } + + err = fn(v) + if err != nil { + return err + } + } + + return nil +} diff --git a/daemon/volumes_unit_test.go b/daemon/volumes_unit_test.go new file mode 100644 index 0000000..450d17f --- /dev/null +++ b/daemon/volumes_unit_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/volume" +) + +func TestParseVolumesFrom(t *testing.T) { + cases := []struct { + spec string + expID string + expMode string + fail bool + }{ + {"", "", "", true}, + {"foobar", "foobar", "rw", false}, + {"foobar:rw", "foobar", "rw", false}, + {"foobar:ro", "foobar", "ro", false}, + {"foobar:baz", "", "", true}, + } + + for _, c := range cases { + id, mode, err := volume.ParseVolumesFrom(c.spec) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) + } + continue + } + + if id != c.expID { + t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) + } + if mode != c.expMode { + t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) + } + } +} diff --git a/daemon/volumes_unix.go b/daemon/volumes_unix.go new file mode 100644 index 0000000..0a4cbf8 --- /dev/null +++ b/daemon/volumes_unix.go @@ -0,0 +1,232 @@ +// +build !windows + +// TODO(amitkris): We need to split this file for solaris. + +package daemon + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/pkg/errors" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + // TODO: tmpfs mounts should be part of Mountpoints + tmpfsMounts := make(map[string]bool) + tmpfsMountInfo, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + for _, m := range tmpfsMountInfo { + tmpfsMounts[m.Destination] = true + } + for _, m := range c.MountPoints { + if tmpfsMounts[m.Destination] { + continue + } + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + // If the daemon is being shutdown, we should not let a container start if it is trying to + // mount the socket the daemon is listening on. During daemon shutdown, the socket + // (/var/run/docker.sock by default) doesn't exist anymore causing the call to m.Setup to + // create at directory instead. This in turn will prevent the daemon to restart. + checkfunc := func(m *volume.MountPoint) error { + if _, exist := daemon.hosts[m.Source]; exist && daemon.IsShuttingDown() { + return fmt.Errorf("Could not mount %q to container while the daemon is shutting down", m.Source) + } + return nil + } + + path, err := m.Setup(c.MountLabel, daemon.idMappings.RootPair(), checkfunc) + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: string(m.Propagation), + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": string(m.Propagation), + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootIDs := daemon.idMappings.RootPair() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) { + if bind.Mode == "" { + bind.Mode = "z" + } +} + +// migrateVolume links the contents of a volume created pre Docker 1.7 +// into the location expected by the local driver. +// It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. +// It preserves the volume json configuration generated pre Docker 1.7 to be able to +// downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. +func migrateVolume(id, vfs string) error { + l, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return err + } + + newDataPath := l.(*local.Root).DataPath(id) + fi, err := os.Stat(newDataPath) + if err != nil && !os.IsNotExist(err) { + return err + } + + if fi != nil && fi.IsDir() { + return nil + } + + return os.Symlink(vfs, newDataPath) +} + +// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. +// It reads the container configuration and creates valid mount points for the old volumes. +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // Inspect old structures only when we're upgrading from old versions + // to versions >= 1.7 and the MountPoints has not been populated with volumes data. + type volumes struct { + Volumes map[string]string + VolumesRW map[string]bool + } + cfgPath, err := container.ConfigPath() + if err != nil { + return err + } + f, err := os.Open(cfgPath) + if err != nil { + return errors.Wrap(err, "could not open container config") + } + defer f.Close() + var cv volumes + if err := json.NewDecoder(f).Decode(&cv); err != nil { + return errors.Wrap(err, "could not decode container config") + } + + if len(container.MountPoints) == 0 && len(cv.Volumes) > 0 { + for destination, hostPath := range cv.Volumes { + vfsPath := filepath.Join(daemon.root, "vfs", "dir") + rw := cv.VolumesRW != nil && cv.VolumesRW[destination] + + if strings.HasPrefix(hostPath, vfsPath) { + id := filepath.Base(hostPath) + v, err := daemon.volumes.CreateWithRef(id, volume.DefaultDriverName, container.ID, nil, nil) + if err != nil { + return err + } + if err := migrateVolume(id, hostPath); err != nil { + return err + } + container.AddMountPointWithVolume(destination, v, true) + } else { // Bind mount + m := volume.MountPoint{Source: hostPath, Destination: destination, RW: rw} + container.MountPoints[destination] = &m + } + } + } + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, bindMountType, opts); err != nil { + return err + } + + // mountVolumes() seems to be called for temporary mounts + // outside the container. Soon these will be unmounted with + // lazy unmount option and given we have mounted the rbind, + // all the submounts will propagate if these are shared. If + // daemon is running in host namespace and has / as shared + // then these unmounts will propagate and unmount original + // mount as well. So make all these mounts rprivate. + // Do not use propagation property of volume as that should + // apply only when mounting happen inside the container. + if err := mount.MakeRPrivate(dest); err != nil { + return err + } + } + + return nil +} diff --git a/daemon/volumes_unix_test.go b/daemon/volumes_unix_test.go new file mode 100644 index 0000000..dcd6977 --- /dev/null +++ b/daemon/volumes_unix_test.go @@ -0,0 +1,256 @@ +// +build !windows + +package daemon + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +func TestBackportMountSpec(t *testing.T) { + d := Daemon{containers: container.NewMemoryStore()} + + c := &container.Container{ + State: &container.State{}, + MountPoints: map[string]*volume.MountPoint{ + "/apple": {Destination: "/apple", Source: "/var/lib/docker/volumes/12345678", Name: "12345678", RW: true, CopyData: true}, // anonymous volume + "/banana": {Destination: "/banana", Source: "/var/lib/docker/volumes/data", Name: "data", RW: true, CopyData: true}, // named volume + "/cherry": {Destination: "/cherry", Source: "/var/lib/docker/volumes/data", Name: "data", CopyData: true}, // RO named volume + "/dates": {Destination: "/dates", Source: "/var/lib/docker/volumes/data", Name: "data"}, // named volume nocopy + "/elderberry": {Destination: "/elderberry", Source: "/var/lib/docker/volumes/data", Name: "data"}, // masks anon vol + "/fig": {Destination: "/fig", Source: "/data", RW: true}, // RW bind + "/guava": {Destination: "/guava", Source: "/data", RW: false, Propagation: "shared"}, // RO bind + propagation + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, // volumes-from + + // partially configured mountpoint due to #32613 + // specifically, `mp.Spec.Source` is not set + "/honeydew": { + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/honeydew", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + }, + + // from hostconfig.Mounts + "/jambolan": { + Type: mounttypes.TypeVolume, + Destination: "/jambolan", + Source: "/var/lib/docker/volumes/data", + RW: true, + Name: "data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/jambolan", Source: "data"}, + }, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/banana", + "data:/cherry:ro", + "data:/dates:ro,nocopy", + "data:/elderberry:ro,nocopy", + "/data:/fig", + "/data:/guava:ro,shared", + "data:/honeydew:nocopy", + }, + VolumesFrom: []string{"1:ro"}, + Mounts: []mounttypes.Mount{ + {Type: mounttypes.TypeVolume, Target: "/jambolan"}, + }, + }, + Config: &containertypes.Config{Volumes: map[string]struct{}{ + "/apple": {}, + "/elderberry": {}, + }}, + } + + d.containers.Add("1", &container.Container{ + State: &container.State{}, + ID: "1", + MountPoints: map[string]*volume.MountPoint{ + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/kumquat:ro", + }, + }, + }) + + type expected struct { + mp *volume.MountPoint + comment string + } + + pretty := func(mp *volume.MountPoint) string { + b, err := json.MarshalIndent(mp, "\t", " ") + if err != nil { + return fmt.Sprintf("%#v", mp) + } + return string(b) + } + + for _, x := range []expected{ + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/apple", + RW: true, + Name: "12345678", + Source: "/var/lib/docker/volumes/12345678", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "", + Target: "/apple", + }, + }, + comment: "anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/banana", + RW: true, + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/banana", + }, + }, + comment: "named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/cherry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/cherry", + ReadOnly: true, + }, + }, + comment: "read-only named volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/dates", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/dates", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "named volume with nocopy", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/elderberry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/elderberry", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "masks an anonymous volume", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/fig", + Source: "/data", + RW: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/fig", + }, + }, + comment: "bind mount with read/write", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/guava", + Source: "/data", + RW: false, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/guava", + ReadOnly: true, + BindOptions: &mounttypes.BindOptions{Propagation: "shared"}, + }, + }, + comment: "bind mount with read/write + shared propgation", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Source: "/var/lib/docker/volumes/data", + RW: true, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/honeydew", + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + { + mp: &(*c.MountPoints["/jambolan"]), // copy the mountpoint, expect no changes + comment: "volume defined in mounts API", + }, + { + mp: &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/kumquat", + Source: "/var/lib/docker/volumes/data", + RW: false, + Name: "data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/kumquat", + ReadOnly: true, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + } { + + mp := c.MountPoints[x.mp.Destination] + d.backportMountSpec(c) + + if !reflect.DeepEqual(mp.Spec, x.mp.Spec) { + t.Fatalf("%s\nexpected:\n\t%s\n\ngot:\n\t%s", x.comment, pretty(x.mp), pretty(mp)) + } + } +} diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go new file mode 100644 index 0000000..62c9e23 --- /dev/null +++ b/daemon/volumes_windows.go @@ -0,0 +1,48 @@ +// +build windows + +package daemon + +import ( + "sort" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/volume" +) + +// setupMounts configures the mount points for a container by appending each +// of the configured mounts on the container to the OCI mount structure +// which will ultimately be passed into the oci runtime during container creation. +// It also ensures each of the mounts are lexicographically sorted. + +// BUGBUG TODO Windows containerd. This would be much better if it returned +// an array of runtime spec mounts, not container mounts. Then no need to +// do multiple transitions. + +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mnts []container.Mount + for _, mount := range c.MountPoints { // type is volume.MountPoint + if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { + return nil, err + } + s, err := mount.Setup(c.MountLabel, idtools.IDPair{0, 0}, nil) + if err != nil { + return nil, err + } + + mnts = append(mnts, container.Mount{ + Source: s, + Destination: mount.Destination, + Writable: mount.RW, + }) + } + + sort.Sort(mounts(mnts)) + return mnts, nil +} + +// setBindModeIfNull is platform specific processing which is a no-op on +// Windows. +func setBindModeIfNull(bind *volume.MountPoint) { + return +} diff --git a/daemon/wait.go b/daemon/wait.go new file mode 100644 index 0000000..76c16b9 --- /dev/null +++ b/daemon/wait.go @@ -0,0 +1,22 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "golang.org/x/net/context" +) + +// ContainerWait waits until the given container is in a certain state +// indicated by the given condition. If the container is not found, a nil +// channel and non-nil error is returned immediately. If the container is +// found, a status result will be sent on the returned channel once the wait +// condition is met or if an error occurs waiting for the container (such as a +// context timeout or cancellation). On a successful wait, the exit code of the +// container is returned in the status with a non-nil Err() value. +func (daemon *Daemon) ContainerWait(ctx context.Context, name string, condition container.WaitCondition) (<-chan container.StateStatus, error) { + cntr, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return cntr.Wait(ctx, condition), nil +} diff --git a/daemon/workdir.go b/daemon/workdir.go new file mode 100644 index 0000000..6360f24 --- /dev/null +++ b/daemon/workdir.go @@ -0,0 +1,20 @@ +package daemon + +// ContainerCreateWorkdir creates the working directory. This solves the +// issue arising from https://github.com/docker/docker/issues/27545, +// which was initially fixed by https://github.com/docker/docker/pull/27884. But that fix +// was too expensive in terms of performance on Windows. Instead, +// https://github.com/docker/docker/pull/28514 introduces this new functionality +// where the builder calls into the backend here to create the working directory. +func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { + container, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(container) + if err != nil { + return err + } + defer daemon.Unmount(container) + return container.SetupWorkingDirectory(daemon.idMappings.RootPair()) +} diff --git a/distribution/config.go b/distribution/config.go new file mode 100644 index 0000000..1765e65 --- /dev/null +++ b/distribution/config.go @@ -0,0 +1,264 @@ +package distribution + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// Config stores configuration for communicating +// with a registry. +type Config struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore ImageConfigStore + // ReferenceStore manages tags. This value is optional, when excluded + // content will not be tagged. + ReferenceStore refstore.Store + // RequireSchema2 ensures that only schema2 manifests are used. + RequireSchema2 bool +} + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + Config + + // DownloadManager manages concurrent pulls. + DownloadManager RootFSDownloadManager + // Schema2Types is the valid schema2 configuration types allowed + // by the pull operation. + Schema2Types []string + // Platform is the requested platform of the image being pulled to ensure it can be validated + // when the host platform supports multiple image operating systems. + Platform string +} + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + Config + + // ConfigMediaType is the configuration media type for + // schema2 manifests. + ConfigMediaType string + // LayerStore manages layers. + LayerStore PushLayerProvider + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// ImageConfigStore handles storing and getting image configurations +// by digest. Allows getting an image configurations rootfs from the +// configuration. +type ImageConfigStore interface { + Put([]byte) (digest.Digest, error) + Get(digest.Digest) ([]byte, error) + GetTarSeekStream(digest.Digest) (ioutils.ReadSeekCloser, error) + RootFSAndPlatformFromConfig([]byte) (*image.RootFS, layer.Platform, error) +} + +// PushLayerProvider provides layers to be pushed by ChainID. +type PushLayerProvider interface { + Get(layer.ChainID) (PushLayer, error) +} + +// PushLayer is a pushable layer with metadata about the layer +// and access to the content of the layer. +type PushLayer interface { + ChainID() layer.ChainID + DiffID() layer.DiffID + Parent() PushLayer + Open() (io.ReadCloser, error) + Size() (int64, error) + MediaType() string + Release() +} + +// RootFSDownloadManager handles downloading of the rootfs +type RootFSDownloadManager interface { + // Download downloads the layers into the given initial rootfs and + // returns the final rootfs. + // Given progress output to track download progress + // Returns function to release download resources + Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) +} + +type imageConfigStore struct { + image.Store + deltaStore image.Store +} + +// NewImageConfigStoreFromStore returns an ImageConfigStore backed +// by an image.Store for container images. +func NewImageConfigStoreFromStore(is, deltaImageStore image.Store) ImageConfigStore { + return &imageConfigStore{ + Store: is, + deltaStore: deltaImageStore, + } +} + +func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { + id, err := s.Store.Create(c) + return digest.Digest(id), err +} + +func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { + img, err := s.Store.Get(image.IDFromDigest(d)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (s *imageConfigStore) GetTarSeekStream(d digest.Digest) (ioutils.ReadSeekCloser, error) { + stream, err := s.Store.GetTarSeekStream(image.IDFromDigest(d)) + if err != nil && s.deltaStore != nil { + return s.deltaStore.GetTarSeekStream(image.IDFromDigest(d)) + } + return stream, err +} + +func (s *imageConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, "", err + } + + // fail immediately on Windows when downloading a non-Windows image + // and vice versa. Exception on Windows if Linux Containers are enabled. + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" && !system.LCOWSupported() { + return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { + return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } + + platform := "" + if runtime.GOOS == "windows" { + platform = unmarshalledConfig.OS + } + return unmarshalledConfig.RootFS, layer.Platform(platform), nil +} + +type storeLayerProvider struct { + ls layer.Store +} + +// NewLayerProviderFromStore returns a layer provider backed by +// an instance of LayerStore. Only getting layers as gzipped +// tars is supported. +func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider { + return &storeLayerProvider{ + ls: ls, + } +} + +func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { + if lid == "" { + return &storeLayer{ + Layer: layer.EmptyLayer, + }, nil + } + l, err := p.ls.Get(lid) + if err != nil { + return nil, err + } + + sl := storeLayer{ + Layer: l, + ls: p.ls, + } + if d, ok := l.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + }, nil + } + + return &sl, nil +} + +type storeLayer struct { + layer.Layer + ls layer.Store +} + +func (l *storeLayer) Parent() PushLayer { + p := l.Layer.Parent() + if p == nil { + return nil + } + sl := storeLayer{ + Layer: p, + ls: l.ls, + } + if d, ok := p.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + } + } + + return &sl +} + +func (l *storeLayer) Open() (io.ReadCloser, error) { + return l.Layer.TarStream() +} + +func (l *storeLayer) Size() (int64, error) { + return l.Layer.DiffSize() +} + +func (l *storeLayer) MediaType() string { + // layer store always returns uncompressed tars + return schema2.MediaTypeUncompressedLayer +} + +func (l *storeLayer) Release() { + if l.ls != nil { + layer.ReleaseAndLog(l.ls, l.Layer) + } +} + +type describableStoreLayer struct { + storeLayer + describable distribution.Describable +} + +func (l *describableStoreLayer) Descriptor() distribution.Descriptor { + return l.describable.Descriptor() +} diff --git a/distribution/errors.go b/distribution/errors.go new file mode 100644 index 0000000..7f97c1d --- /dev/null +++ b/distribution/errors.go @@ -0,0 +1,159 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" + "github.com/pkg/errors" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.Cause().Error() +} + +func (f fallbackError) Cause() error { + return f.err +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// TranslatePullError is used to convert an error from a registry pull +// operation to an error representing the entire pull operation. Any error +// information which is not used by the returned error gets output to +// log at info level. +func TranslatePullError(err error, ref reference.Named) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + for _, extra := range v[1:] { + logrus.Infof("Ignoring extra error returned from registry: %v", extra) + } + return TranslatePullError(v[0], ref) + } + case errcode.Error: + var newErr error + switch v.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + newErr = errors.Errorf("pull access denied for %s, repository does not exist or may require 'docker login'", reference.FamiliarName(ref)) + case v2.ErrorCodeManifestUnknown: + newErr = errors.Errorf("manifest for %s not found", reference.FamiliarString(ref)) + case v2.ErrorCodeNameUnknown: + newErr = errors.Errorf("repository %s not found", reference.FamiliarName(ref)) + } + if newErr != nil { + logrus.Infof("Translating %q to %q", err, newErr) + return newErr + } + case xfer.DoNotRetry: + return TranslatePullError(v.Err, ref) + } + + return err +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if err == distribution.ErrBlobUnknown { + return xfer.DoNotRetry{Err: err} + } + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/distribution/fixtures/validate_manifest/bad_manifest b/distribution/fixtures/validate_manifest/bad_manifest new file mode 100644 index 0000000..a1f02a6 --- /dev/null +++ b/distribution/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/distribution/fixtures/validate_manifest/extra_data_manifest b/distribution/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 0000000..beec19a --- /dev/null +++ b/distribution/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/distribution/fixtures/validate_manifest/good_manifest b/distribution/fixtures/validate_manifest/good_manifest new file mode 100644 index 0000000..b107de3 --- /dev/null +++ b/distribution/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/distribution/metadata/metadata.go b/distribution/metadata/metadata.go new file mode 100644 index 0000000..3dae795 --- /dev/null +++ b/distribution/metadata/metadata.go @@ -0,0 +1,77 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string + platform string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath, platform string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + platform: platform, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return ioutils.AtomicWriteFile(path, value, 0644) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/distribution/metadata/v1_id_service.go b/distribution/metadata/v1_id_service.go new file mode 100644 index 0000000..f262d4d --- /dev/null +++ b/distribution/metadata/v1_id_service.go @@ -0,0 +1,51 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if idserv.store == nil { + return "", errors.New("no v1IDService storage") + } + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if idserv.store == nil { + return nil + } + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/distribution/metadata/v1_id_service_test.go b/distribution/metadata/v1_id_service_test.go new file mode 100644 index 0000000..385901e --- /dev/null +++ b/distribution/metadata/v1_id_service_test.go @@ -0,0 +1,84 @@ +package metadata + +import ( + "io/ioutil" + "os" + "runtime" + "testing" + + "github.com/docker/docker/layer" +) + +func TestV1IDService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "v1-id-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + v1IDService := NewV1IDService(metadataStore) + + testVectors := []struct { + registry string + v1ID string + layerID layer.DiffID + }{ + { + registry: "registry1", + v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", + layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + }, + { + registry: "registry2", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + }, + { + registry: "registry1", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + }, + } + + // Set some associations + for _, vec := range testVectors { + err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + layerID, err := v1IDService.Get(vec.v1ID, vec.registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != vec.layerID { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test Get on a nonexistent entry + _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != testVectors[1].layerID { + t.Fatal("Get returned incorrect layer ID") + } +} diff --git a/distribution/metadata/v2_metadata_service.go b/distribution/metadata/v2_metadata_service.go new file mode 100644 index 0000000..c31c721 --- /dev/null +++ b/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,241 @@ +package metadata + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService interface { + GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) + GetDiffID(dgst digest.Digest) (layer.DiffID, error) + Add(diffID layer.DiffID, metadata V2Metadata) error + TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error + Remove(metadata V2Metadata) error +} + +// v2MetadataService implements V2MetadataService +type v2MetadataService struct { + store Store +} + +var _ V2MetadataService = &v2MetadataService{} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string + // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching + // metadata entries accompanied by the same credentials without actually exposing them. + HMAC string +} + +// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key". +func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { + if len(meta.HMAC) == 0 || len(key) == 0 { + return len(meta.HMAC) == 0 && len(key) == 0 + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + expectedMac := mac.Sum(nil) + + storedMac, err := hex.DecodeString(meta.HMAC) + if err != nil { + return false + } + + return hmac.Equal(storedMac, expectedMac) +} + +// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. +func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { + if len(key) == 0 || meta == nil { + return "" + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata +// entries. +func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { + if authConfig == nil { + return nil, nil + } + key := authConfigKeyInput{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + } + buf, err := json.Marshal(&key) + if err != nil { + return nil, err + } + return []byte(digest.FromBytes([]byte(buf))), nil +} + +// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for +// hmac key creation. +type authConfigKeyInput struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) V2MetadataService { + return &v2MetadataService{ + store: store, + } +} + +func (serv *v2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *v2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + if serv.store == nil { + return nil, errors.New("no metadata storage") + } + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + if serv.store == nil { + return layer.DiffID(""), errors.New("no metadata storage") + } + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an add becomes a no-op. + // TODO: implement in memory storage + return nil + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer +// DiffID. If too many metadata entries are present, the oldest one is dropped. +func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { + meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) + return serv.Add(diffID, meta) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *v2MetadataService) Remove(metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an remove becomes a no-op. + // TODO: implement in memory storage + return nil + } + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/distribution/metadata/v2_metadata_service_test.go b/distribution/metadata/v2_metadata_service_test.go new file mode 100644 index 0000000..b5d59b2 --- /dev/null +++ b/distribution/metadata/v2_metadata_service_test.go @@ -0,0 +1,116 @@ +package metadata + +import ( + "encoding/hex" + "io/ioutil" + "math/rand" + "os" + "reflect" + "runtime" + "testing" + + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +func TestV2MetadataService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + V2MetadataService := NewV2MetadataService(metadataStore) + + tooManyBlobSums := make([]V2Metadata, 100) + for i := range tooManyBlobSums { + randDigest := randomDigest() + tooManyBlobSums[i] = V2Metadata{Digest: randDigest} + } + + testVectors := []struct { + diffID layer.DiffID + metadata []V2Metadata + }{ + { + diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + }, + }, + { + diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, + }, + }, + { + diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + metadata: tooManyBlobSums, + }, + } + + // Set some associations + for _, vec := range testVectors { + for _, blobsum := range vec.metadata { + err := V2MetadataService.Add(vec.diffID, blobsum) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + metadata, err := V2MetadataService.GetMetadata(vec.diffID) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + expectedMetadataEntries := len(vec.metadata) + if expectedMetadataEntries > 50 { + expectedMetadataEntries = 50 + } + if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test GetMetadata on a nonexistent entry + _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Test GetDiffID on a nonexistent entry + _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) + if err != nil { + t.Fatalf("error calling Add: %v", err) + } + diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) + if err != nil { + t.Fatalf("error calling GetDiffID: %v", err) + } + if diffID != testVectors[1].diffID { + t.Fatal("GetDiffID returned incorrect diffID") + } +} + +func randomDigest() digest.Digest { + b := [32]byte{} + for i := 0; i < len(b); i++ { + b[i] = byte(rand.Intn(256)) + } + d := hex.EncodeToString(b[:]) + return digest.Digest("sha256:" + d) +} diff --git a/distribution/pull.go b/distribution/pull.go new file mode 100644 index 0000000..c5bdbd6 --- /dev/null +++ b/distribution/pull.go @@ -0,0 +1,198 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not `scratch` + if err := ValidateRepoName(repoInfo.Name); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", reference.FamiliarName(repoInfo.Name), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Infof("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return TranslatePullError(err, ref) + } + + imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", reference.FamiliarString(ref)) + } + + return TranslatePullError(lastErr, ref) +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// ValidateRepoName validates the name of a repository. +func ValidateRepoName(name reference.Named) error { + if reference.FamiliarName(name) == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { + dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + return err + } + + if oldTagID, err := store.Get(dgstRef); err == nil { + if oldTagID != id { + // Updating digests not supported by reference store + logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + } + return nil + } else if err != refstore.ErrDoesNotExist { + return err + } + + return store.AddDigest(dgstRef, id, true) +} diff --git a/distribution/pull_v1.go b/distribution/pull_v1.go new file mode 100644 index 0000000..8d12a26 --- /dev/null +++ b/distribution/pull_v1.go @@ -0,0 +1,376 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name()) + + tagged, isTagged := ref.(reference.NamedTagged) + + repoData, err := p.session.GetRepositoryData(p.repoInfo.Name) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + if isTagged { + return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag()) + } + return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name)) + } + // Unexpected HTTP error + return err + } + + logrus.Debug("Retrieving the tag list") + var tagsList map[string]string + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, "", descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return err + } + + if p.config.ReferenceStore != nil { + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) DeltaBase() io.ReadSeeker { + return nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} + +func (ld *v1LayerDescriptor) Size() int64 { + return ld.layerSize +} diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go new file mode 100644 index 0000000..23fa221 --- /dev/null +++ b/distribution/pull_v2.go @@ -0,0 +1,819 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +var ( + errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + errRootFSInvalid = errors.New("invalid rootfs in image configuration") +) + +const maxDownloadAttempts = 5 + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + diffID layer.DiffID + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier + src distribution.Descriptor + ctx context.Context + layerDownload io.ReadCloser + downloadAttempts uint8 + downloadOffset int64 + deltaBase io.ReadSeeker +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + if ld.diffID != "" { + return ld.diffID, nil + } + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) reset() error { + if ld.layerDownload != nil { + ld.layerDownload.Close() + ld.layerDownload = nil + } + + layer, err := ld.open(ld.ctx) + if err != nil { + return err + } + + if _, err := layer.Seek(ld.downloadOffset, os.SEEK_SET); err != nil { + return err + } + + ld.layerDownload = ioutils.TeeReadCloser(ioutils.NewCancelReadCloser(ld.ctx, layer), ld.verifier) + + return nil +} + +func (ld *v2LayerDescriptor) Read(p []byte) (int, error) { + if ld.downloadAttempts <= 0 { + return 0, fmt.Errorf("no request retries left") + } + + if ld.layerDownload == nil { + if err := ld.reset(); err != nil { + ld.downloadAttempts -= 1 + return 0, err + } + } + + n, err := ld.layerDownload.Read(p) + ld.downloadOffset += int64(n) + if err == io.EOF { + if !ld.verifier.Verified() { + return n, fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + } + } else if err != nil { + logrus.Warnf("failed to download layer: \"%v\", retrying to read again", err) + ld.downloadAttempts -= 1 + ld.layerDownload = nil + err = nil + } + + return n, err +} + +func (ld *v2LayerDescriptor) DeltaBase() io.ReadSeeker { + return ld.deltaBase +} + +func (ld *v2LayerDescriptor) Close() { + if ld.layerDownload != nil { + ld.layerDownload.Close() + } +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + ld.ctx = ctx + ld.layerDownload = nil + ld.downloadAttempts = maxDownloadAttempts + ld.verifier = ld.digest.Verifier() + + progress.Update(progressOutput, ld.ID(), "Ready to download") + + return ioutils.NewReadCloserWrapper(ld, func() error { return nil }), ld.src.Size, nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) +} + +func (ld *v2LayerDescriptor) Size() int64 { + return ld.src.Size +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + if m, ok := manifest.(*schema2.DeserializedManifest); ok { + var allowedMediatype bool + for _, t := range p.config.Schema2Types { + if m.Manifest.Config.MediaType == t { + allowedMediatype = true + break + } + } + if !allowedMediatype { + configClass := mediaTypeClasses[m.Manifest.Config.MediaType] + if configClass == "" { + configClass = "unknown" + } + return false, fmt.Errorf("Encountered remote %q(%s) when fetching", m.Manifest.Config.MediaType, configClass) + } + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) + + var ( + id digest.Digest + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + if p.config.RequireSchema2 { + return false, fmt.Errorf("invalid manifest: not schema2") + } + id, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + id, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + id, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + if p.config.ReferenceStore != nil { + oldTagID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagID == id { + return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) + } + } else if err != refstore.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { + return false, err + } + } else { + if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return false, err + } + if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { + return false, err + } + } + } + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + // The v1 manifest itself doesn't directly contain a platform. However, + // the history does, but unfortunately that's a string, so search through + // all the history until hopefully we find one which indicates the os. + platform := runtime.GOOS + if system.LCOWSupported() { + type config struct { + Os string `json:"os,omitempty"` + } + for _, v := range verifiedManifest.History { + var c config + if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil { + if c.Os != "" { + platform = c.Os + break + } + } + } + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.Platform(platform), descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + if _, err := p.config.ImageStore.Get(target.Digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return target.Digest, manifestDigest, nil + } + + // Pull the image config + configJSON, err := p.pullSchema2Config(ctx, target.Digest) + if err != nil { + return "", "", ImageConfigPullError{Err: err} + } + + var deltaBase io.ReadSeeker + + // check for delta config + img, err := image.NewFromJSON(configJSON) + if err != nil { + return "", "", err + } + + if img.Config != nil { + if base, ok := img.Config.Labels["io.resin.delta.base"]; ok { + digest, err := digest.Parse(base) + if err != nil { + return "", "", err + } + + stream, err := p.config.ImageStore.GetTarSeekStream(digest) + if err != nil { + return "", "", err + } + defer stream.Close() + + deltaBase = stream + } + + if config, ok := img.Config.Labels["io.resin.delta.config"]; ok { + digest := digest.FromString(config) + + if _, err := p.config.ImageStore.Get(digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return digest, manifestDigest, nil + } + + configJSON = []byte(config) + } + } + + configRootFS, platform, err := p.config.ImageStore.RootFSAndPlatformFromConfig(configJSON) + if err == nil && configRootFS == nil { + return "", "", errRootFSInvalid + } + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.Layers { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + src: d, + deltaBase: deltaBase, + } + + descriptors = append(descriptors, layerDescriptor) + } + + layerErrChan := make(chan error, 1) + downloadsDone := make(chan struct{}) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + var ( + downloadedRootFS *image.RootFS // rootFS from registered layers + release func() // release resources from rootFS download + ) + + if len(descriptors) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + // Populate diff ids in descriptors to avoid downloading foreign layers + // which have been side loaded + for i := range descriptors { + descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] + } + + if p.config.DownloadManager != nil { + go func() { + var ( + err error + rootFS image.RootFS + ) + downloadRootFS := *image.NewRootFS() + rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, platform, descriptors, p.config.ProgressOutput) + if err != nil { + // Intentionally do not cancel the config download here + // as the error from config download (if there is one) + // is more interesting than the layer download error + layerErrChan <- err + return + } + + downloadedRootFS = &rootFS + close(downloadsDone) + }() + } else { + // We have nothing to download + close(downloadsDone) + } + + select { + case <-downloadsDone: + case err = <-layerErrChan: + return "", "", err + } + + if release != nil { + defer release() + } + + if downloadedRootFS != nil { + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range downloadedRootFS.DiffIDs { + if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + } + + imageID, err := p.config.ImageStore.Put(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specific manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a os/arch match", ref, len(mfstList.Manifests)) + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDigest.String()) + break + } + } + + if manifestDigest == "" { + errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", runtime.GOOS, runtime.GOARCH) + logrus.Debugf(errMsg) + return "", "", errors.New(errMsg) + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + id, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + id, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return id, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier := dgst.Verifier() + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} diff --git a/distribution/pull_v2_test.go b/distribution/pull_v2_test.go new file mode 100644 index 0000000..df93c1e --- /dev/null +++ b/distribution/pull_v2_test.go @@ -0,0 +1,183 @@ +package distribution + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "invalid parent ID in the base layer of the image") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + expectedDigest, err := reference.ParseNormalizedNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") + if err != nil { + t.Fatal("could not parse reference") + } + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest schema1.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest schema1.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest schema1.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } +} diff --git a/distribution/pull_v2_unix.go b/distribution/pull_v2_unix.go new file mode 100644 index 0000000..45a7a0c --- /dev/null +++ b/distribution/pull_v2_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) +} diff --git a/distribution/pull_v2_windows.go b/distribution/pull_v2_windows.go new file mode 100644 index 0000000..543ecc1 --- /dev/null +++ b/distribution/pull_v2_windows.go @@ -0,0 +1,57 @@ +// +build windows + +package distribution + +import ( + "net/http" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/client/transport" +) + +var _ distribution.Describable = &v2LayerDescriptor{} + +func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { + if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { + return ld.src + } + return distribution.Descriptor{} +} + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + rsc, err := blobs.Open(ctx, ld.digest) + + if len(ld.src.URLs) == 0 { + return rsc, err + } + + // We're done if the registry has this blob. + if err == nil { + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + if _, err = rsc.Seek(0, os.SEEK_SET); err == nil { + return rsc, nil + } + rsc.Close() + } + + // Find the first URL that results in a 200 result code. + for _, url := range ld.src.URLs { + logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + _, err = rsc.Seek(0, os.SEEK_SET) + if err == nil { + break + } + logrus.Debugf("Download for %v failed: %v", ld.digest, err) + rsc.Close() + rsc = nil + } + return rsc, err +} diff --git a/distribution/push.go b/distribution/push.go new file mode 100644 index 0000000..395e4d1 --- /dev/null +++ b/distribution/push.go @@ -0,0 +1,186 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on ref. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.Name.Name()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name) + if len(associations) == 0 { + return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name)) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Infof("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/distribution/push_v1.go b/distribution/push_v1.go new file mode 100644 index 0000000..5587b56 --- /dev/null +++ b/distribution/push_v1.go @@ -0,0 +1,463 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := imageID.Digest().Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage { + v1ID := digest.Digest(l.ChainID()).Hex() + + var config string + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + } +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + var dgst digest.Digest + dgst, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + imgID = image.IDFromDigest(dgst) + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[digest.Digest]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + imgID := image.IDFromDigest(association.ID) + tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag()) + + if _, present := imagesSeen[association.ID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) { + ics, ok := p.config.ImageStore.(*imageConfigStore) + if !ok { + return nil, fmt.Errorf("only image store images supported for v1 push") + } + img, err := ics.Store.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + topLayer := layer.Layer(layer.EmptyLayer) + + pl, err := p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, pl) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + + // V1 push is deprecated, only support existing layerstore layers + lsl, ok := pl.(*storeLayer) + if !ok { + return nil, fmt.Errorf("only layer store layers supported for v1 push") + } + l := lsl.Layer + + if l.DiffID() == layer.EmptyLayer.DiffID() { + topLayer = l + l = l.Parent() + } + + dependencyImages, parent := generateDependencyImages(l, dependenciesSeen) + + topImage, err := newV1TopImage(imgID, img, topLayer, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) { + if l == nil { + return nil, nil + } + + imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage + } + } + + dependencyImage := newV1DependencyImage(l, parent) + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + l.Release() + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/distribution/push_v2.go b/distribution/push_v2.go new file mode 100644 index 0000000..ffc7d68 --- /dev/null +++ b/distribution/push_v2.go @@ -0,0 +1,691 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "runtime" + "sort" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" +) + +const ( + smallLayerMaximumSize = 100 * (1 << 10) // 100KB + middleLayerMaximumSize = 10 * (1 << 20) // 10MB +) + +type v2Pusher struct { + v2MetadataService metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { + logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) + + imgConfig, err := p.config.ImageStore.Get(id) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) + } + + rootfs, _, err := p.config.ImageStore.RootFSAndPlatformFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) + } + + l, err := p.config.LayerStore.Get(rootfs.ChainID()) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer l.Release() + + hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) + if err != nil { + return fmt.Errorf("failed to compute hmac key of auth config: %v", err) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + hmacKey: hmacKey, + repoInfo: p.repoInfo.Name, + ref: p.ref, + endpoint: p.endpoint, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for range rootfs.DiffIDs { + descriptor := descriptorTemplate + descriptor.layer = l + descriptor.checkedDigests = make(map[digest.Digest]struct{}) + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err + } + + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + + if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return err + } + + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer PushLayer + v2MetadataService metadata.V2MetadataService + hmacKey []byte + repoInfo reference.Named + ref reference.Named + endpoint registry.APIEndpoint + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor + // a set of digests whose presence has been checked in a target repository + checkedDigests map[digest.Digest]struct{} +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + // Skip foreign layers unless this registry allows nondistributable artifacts. + if !pd.endpoint.AllowNondistributableArtifacts { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } + } + } + + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + // check for blob existence in the target repository + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + for _, mountCandidate := range candidates { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + createOpts := []distribution.BlobCreateOption{} + + if len(mountCandidate.SourceRepository) > 0 { + namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) + if err != nil { + logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err) + pd.v2MetadataService.Remove(mountCandidate) + continue + } + + // Candidates are always under same domain, create remote reference + // with only path to set mount from with + remoteRef, err := reference.WithName(reference.Path(namedRef)) + if err != nil { + logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err) + continue + } + + canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) + if err != nil { + logrus.Errorf("failed to make canonical reference: %v", err) + continue + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // send the layer + lu, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case nil: + // noop + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: err.Descriptor.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + default: + logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + } + + if len(mountCandidate.SourceRepository) > 0 && + (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || + len(mountCandidate.HMAC) == 0) { + cause := "blob mount failure" + if err != nil { + cause = fmt.Sprintf("an error: %v", err.Error()) + } + logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + pd.v2MetadataService.Remove(mountCandidate) + } + + if lu != nil { + // cancel previous upload + cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) + layerUpload = lu + } + } + + if maxExistenceChecks-len(pd.checkedDigests) > 0 { + // do additional layer existence checks with other known digests if any + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + // upload the blob + desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) + if err != nil { + return desc, err + } + + return desc, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +func (pd *v2PushDescriptor) uploadUsingSession( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + layerUpload distribution.BlobWriter, +) (distribution.Descriptor, error) { + var reader io.ReadCloser + + contentReader, err := pd.layer.Open() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + size, _ := pd.layer.Size() + + reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") + + switch m := pd.layer.MediaType(); m { + case schema2.MediaTypeUncompressedLayer: + compressedReader, compressionDone := compress(reader) + defer func(closer io.Closer) { + closer.Close() + <-compressionDone + }(reader) + reader = compressedReader + case schema2.MediaTypeLayer: + default: + reader.Close() + return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) + } + + digester := digest.Canonical.Digester() + tee := io.TeeReader(reader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + reader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: pushDigest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + desc := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Lock() + // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + + return desc, nil +} + +// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" +// slice. If it finds one that the registry knows about, it returns the known digest and "true". If +// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository +// (not just the target one). +func (pd *v2PushDescriptor) layerAlreadyExists( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + checkOtherRepositories bool, + maxExistenceCheckAttempts int, + v2Metadata []metadata.V2Metadata, +) (desc distribution.Descriptor, exists bool, err error) { + // filter the metadata + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + // sort the candidates by similarity + sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) + + digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) + // an array of unique blob digests ordered from the best mount candidates to worst + layerDigests := []digest.Digest{} + for i := 0; i < len(candidates); i++ { + if len(layerDigests) >= maxExistenceCheckAttempts { + break + } + meta := &candidates[i] + if _, exists := digestToMetadata[meta.Digest]; exists { + // keep reference just to the first mapping (the best mount candidate) + continue + } + if _, exists := pd.checkedDigests[meta.Digest]; exists { + // existence of this digest has already been tested + continue + } + digestToMetadata[meta.Digest] = meta + layerDigests = append(layerDigests, meta.Digest) + } + +attempts: + for _, dgst := range layerDigests { + meta := digestToMetadata[dgst] + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) + pd.checkedDigests[meta.Digest] = struct{}{} + switch err { + case nil: + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + // cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: desc.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} + } + } + desc.MediaType = schema2.MediaTypeLayer + exists = true + break attempts + case distribution.ErrBlobUnknown: + if meta.SourceRepository == pd.repoInfo.Name() { + // remove the mapping to the target repository + pd.v2MetadataService.Remove(*meta) + } + default: + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + } + } + + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + } + + return desc, exists, nil +} + +// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from +// source repositories of target registry, maximum number of layer existence checks performed on the target +// repository and whether the check shall be done also with digests mapped to different repositories. The +// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost +// of upload does not outweigh a latency. +func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { + size, err := layer.Size() + switch { + // big blob + case size > middleLayerMaximumSize: + // 1st attempt to mount the blob few times + // 2nd few existence checks with digests associated to any repository + // then fallback to upload + return 4, 3, true + + // middle sized blobs; if we could not get the size, assume we deal with middle sized blob + case size > smallLayerMaximumSize, err != nil: + // 1st attempt to mount blobs of average size few times + // 2nd try at most 1 existence check if there's an existing mapping to the target repository + // then fallback to upload + return 3, 1, false + + // small blobs, do a minimum number of checks + default: + return 1, 1, false + } +} + +// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The +// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain +// only metadata entries having registry part of SourceRepository matching the part of repoInfo. +func getRepositoryMountCandidates( + repoInfo reference.Named, + hmacKey []byte, + max int, + v2Metadata []metadata.V2Metadata, +) []metadata.V2Metadata { + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + sourceRepo, err := reference.ParseNamed(meta.SourceRepository) + if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { + continue + } + // target repository is not a viable candidate + if meta.SourceRepository == repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + + sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) + if max >= 0 && len(candidates) > max { + // select the youngest metadata + candidates = candidates[:max] + } + + return candidates +} + +// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The +// candidate "a" is preferred over "b": +// +// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the +// "b" was not +// 2. if a number of its repository path components exactly matching path components of target repository is higher +type byLikeness struct { + arr []metadata.V2Metadata + hmacKey []byte + pathComponents []string +} + +func (bla byLikeness) Less(i, j int) bool { + aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) + bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) + if aMacMatch != bMacMatch { + return aMacMatch + } + aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) + bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) + return aMatch > bMatch +} +func (bla byLikeness) Swap(i, j int) { + bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] +} +func (bla byLikeness) Len() int { return len(bla.arr) } + +func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { + // reverse the metadata array to shift the newest entries to the beginning + for i := 0; i < len(marr)/2; i++ { + marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] + } + // keep equal entries ordered from the youngest to the oldest + sort.Stable(byLikeness{ + arr: marr, + hmacKey: hmacKey, + pathComponents: getPathComponents(repoInfo.Name()), + }) +} + +// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". +func numOfMatchingPathComponents(pth string, matchComponents []string) int { + pthComponents := getPathComponents(pth) + i := 0 + for ; i < len(pthComponents) && i < len(matchComponents); i++ { + if matchComponents[i] != pthComponents[i] { + return i + } + } + return i +} + +func getPathComponents(path string) []string { + return strings.Split(path, "/") +} + +func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { + if layerUpload != nil { + logrus.Debugf("cancelling upload of blob %s", dgst) + err := layerUpload.Cancel(ctx) + if err != nil { + logrus.Warnf("failed to cancel upload: %v", err) + } + } +} diff --git a/distribution/push_v2_test.go b/distribution/push_v2_test.go new file mode 100644 index 0000000..e8e2b1a --- /dev/null +++ b/distribution/push_v2_test.go @@ -0,0 +1,583 @@ +package distribution + +import ( + "net/http" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" +) + +func TestGetRepositoryMountCandidates(t *testing.T) { + for _, tc := range []struct { + name string + hmacKey string + targetRepo string + maxCandidates int + metadata []metadata.V2Metadata + candidates []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item not matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("key", "dgst", "127.0.0.1/repo")}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")}, + candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")}, + }, + { + name: "allow missing SourceRepository", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + candidates: []metadata.V2Metadata{}, + }, + { + name: "handle docker.io", + targetRepo: "user/app", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"}, + }, + candidates: []metadata.V2Metadata{ + {Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"}, + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"}, + }, + }, + { + name: "sort more items", + hmacKey: "abcd", + targetRepo: "127.0.0.1/foo/bar", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + taggedMetadata("hash", "1", "docker.io/library/hello-world"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + taggedMetadata("abcd", "3", "docker.io/library/busybox"), + taggedMetadata("hash", "4", "docker.io/library/busybox"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "7", "127.0.0.1/foo/bar"), + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + // then by longest matching prefix + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + // sort the rest of the matching items in reversed order + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + }, + }, + { + name: "limit max candidates", + hmacKey: "abcd", + targetRepo: "user/app", + maxCandidates: 3, + metadata: []metadata.V2Metadata{ + taggedMetadata("abcd", "1", "docker.io/user/app1"), + taggedMetadata("abcd", "2", "docker.io/user/app/base"), + taggedMetadata("hash", "3", "docker.io/user/app"), + taggedMetadata("abcd", "4", "127.0.0.1/user/app"), + taggedMetadata("hash", "5", "docker.io/user/foo"), + taggedMetadata("hash", "6", "docker.io/app/bar"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "2", "docker.io/user/app/base"), + taggedMetadata("abcd", "1", "docker.io/user/app1"), + // then by longest matching prefix + // "docker.io/usr/app" is excluded since candidates must + // be from a different repository + taggedMetadata("hash", "5", "docker.io/user/foo"), + }, + }, + } { + repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + candidates := getRepositoryMountCandidates(repoInfo, []byte(tc.hmacKey), tc.maxCandidates, tc.metadata) + if len(candidates) != len(tc.candidates) { + t.Errorf("[%s] got unexpected number of candidates: %d != %d", tc.name, len(candidates), len(tc.candidates)) + } + for i := 0; i < len(candidates) && i < len(tc.candidates); i++ { + if !reflect.DeepEqual(candidates[i], tc.candidates[i]) { + t.Errorf("[%s] candidate %d does not match expected: %#+v != %#+v", tc.name, i, candidates[i], tc.candidates[i]) + } + } + for i := len(candidates); i < len(tc.candidates); i++ { + t.Errorf("[%s] missing expected candidate at position %d (%#+v)", tc.name, i, tc.candidates[i]) + } + for i := len(tc.candidates); i < len(candidates); i++ { + t.Errorf("[%s] got unexpected candidate at position %d (%#+v)", tc.name, i, candidates[i]) + } + } +} + +func TestLayerAlreadyExists(t *testing.T) { + for _, tc := range []struct { + name string + metadata []metadata.V2Metadata + targetRepo string + hmacKey string + maxExistenceChecks int + checkOtherRepositories bool + remoteBlobs map[digest.Digest]distribution.Descriptor + remoteErrors map[digest.Digest]error + expectedDescriptor distribution.Descriptor + expectedExists bool + expectedError error + expectedRequests []string + expectedAdditions []metadata.V2Metadata + expectedRemovals []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxExistenceChecks: 3, + checkOtherRepositories: true, + }, + { + name: "single not existent metadata", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + expectedRequests: []string{"pear"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "access denied", + targetRepo: "busybox", + maxExistenceChecks: 1, + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + remoteErrors: map[digest.Digest]error{digest.Digest("apple"): distribution.ErrAccessDenied}, + expectedError: nil, + expectedRequests: []string{"apple"}, + }, + { + name: "not matching reposies", + targetRepo: "busybox", + maxExistenceChecks: 3, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + }, + { + name: "check other repositories", + targetRepo: "busybox", + maxExistenceChecks: 10, + checkOtherRepositories: true, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + expectedRequests: []string{"plum", "apple", "pear", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + { + name: "find existing blob", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + }, + { + name: "find existing blob with different hmac", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{SourceRepository: "docker.io/library/busybox", Digest: digest.Digest("apple"), HMAC: "dummyhmac"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "overwrite media types", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + hmacKey: "key", + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple"), MediaType: "custom-media-type"}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "apple", "docker.io/library/busybox")}, + }, + { + name: "find existing blob among many", + targetRepo: "127.0.0.1/myapp", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("someotherkey", "pear", "127.0.0.1/myapp"), + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + taggedMetadata("", "plum", "127.0.0.1/myapp"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "plum", "pear"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "pear", "127.0.0.1/myapp")}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + {Digest: digest.Digest("plum"), SourceRepository: "127.0.0.1/myapp"}, + }, + }, + { + name: "reach maximum existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedExists: false, + expectedRequests: []string{"banana", "plum", "apple"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + }, + }, + { + name: "zero allowed existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 0, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + }, + { + name: "stat single digest just once", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + taggedMetadata("key1", "pear", "docker.io/library/busybox"), + taggedMetadata("key2", "apple", "docker.io/library/busybox"), + taggedMetadata("key3", "apple", "docker.io/library/busybox"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "pear"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{taggedMetadata("key3", "apple", "docker.io/library/busybox")}, + }, + { + name: "don't stop on first error", + targetRepo: "user/app", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("key", "banana", "docker.io/user/app"), + taggedMetadata("key", "orange", "docker.io/user/app"), + taggedMetadata("key", "plum", "docker.io/user/app"), + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrAccessDenied}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {}}, + expectedError: nil, + expectedRequests: []string{"plum", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "plum", "docker.io/user/app"), + taggedMetadata("key", "banana", "docker.io/user/app"), + }, + }, + { + name: "remove outdated metadata", + targetRepo: "docker.io/user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrBlobUnknown}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("plum"): {}}, + expectedExists: false, + expectedRequests: []string{"orange"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}}, + }, + { + name: "missing SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + maxExistenceChecks: 3, + expectedExists: false, + expectedRequests: []string{"2", "3", "1"}, + }, + + { + name: "with and without SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("3")}, + }, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("1"): {Digest: digest.Digest("1")}}, + maxExistenceChecks: 3, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("1"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"2", "3", "1"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("1"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + } { + repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + repo := &mockRepo{ + t: t, + errors: tc.remoteErrors, + blobs: tc.remoteBlobs, + requests: []string{}, + } + ctx := context.Background() + ms := &mockV2MetadataService{} + pd := &v2PushDescriptor{ + hmacKey: []byte(tc.hmacKey), + repoInfo: repoInfo, + layer: &storeLayer{ + Layer: layer.EmptyLayer, + }, + repo: repo, + v2MetadataService: ms, + pushState: &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)}, + checkedDigests: make(map[digest.Digest]struct{}), + } + + desc, exists, err := pd.layerAlreadyExists(ctx, &progressSink{t}, layer.EmptyLayer.DiffID(), tc.checkOtherRepositories, tc.maxExistenceChecks, tc.metadata) + + if !reflect.DeepEqual(desc, tc.expectedDescriptor) { + t.Errorf("[%s] got unexpected descriptor: %#+v != %#+v", tc.name, desc, tc.expectedDescriptor) + } + if exists != tc.expectedExists { + t.Errorf("[%s] got unexpected exists: %t != %t", tc.name, exists, tc.expectedExists) + } + if !reflect.DeepEqual(err, tc.expectedError) { + t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError) + } + + if len(repo.requests) != len(tc.expectedRequests) { + t.Errorf("[%s] got unexpected number of requests: %d != %d", tc.name, len(repo.requests), len(tc.expectedRequests)) + } + for i := 0; i < len(repo.requests) && i < len(tc.expectedRequests); i++ { + if repo.requests[i] != tc.expectedRequests[i] { + t.Errorf("[%s] request %d does not match expected: %q != %q", tc.name, i, repo.requests[i], tc.expectedRequests[i]) + } + } + for i := len(repo.requests); i < len(tc.expectedRequests); i++ { + t.Errorf("[%s] missing expected request at position %d (%q)", tc.name, i, tc.expectedRequests[i]) + } + for i := len(tc.expectedRequests); i < len(repo.requests); i++ { + t.Errorf("[%s] got unexpected request at position %d (%q)", tc.name, i, repo.requests[i]) + } + + if len(ms.added) != len(tc.expectedAdditions) { + t.Errorf("[%s] got unexpected number of additions: %d != %d", tc.name, len(ms.added), len(tc.expectedAdditions)) + } + for i := 0; i < len(ms.added) && i < len(tc.expectedAdditions); i++ { + if ms.added[i] != tc.expectedAdditions[i] { + t.Errorf("[%s] added metadata at %d does not match expected: %q != %q", tc.name, i, ms.added[i], tc.expectedAdditions[i]) + } + } + for i := len(ms.added); i < len(tc.expectedAdditions); i++ { + t.Errorf("[%s] missing expected addition at position %d (%q)", tc.name, i, tc.expectedAdditions[i]) + } + for i := len(tc.expectedAdditions); i < len(ms.added); i++ { + t.Errorf("[%s] unexpected metadata addition at position %d (%q)", tc.name, i, ms.added[i]) + } + + if len(ms.removed) != len(tc.expectedRemovals) { + t.Errorf("[%s] got unexpected number of removals: %d != %d", tc.name, len(ms.removed), len(tc.expectedRemovals)) + } + for i := 0; i < len(ms.removed) && i < len(tc.expectedRemovals); i++ { + if ms.removed[i] != tc.expectedRemovals[i] { + t.Errorf("[%s] removed metadata at %d does not match expected: %q != %q", tc.name, i, ms.removed[i], tc.expectedRemovals[i]) + } + } + for i := len(ms.removed); i < len(tc.expectedRemovals); i++ { + t.Errorf("[%s] missing expected removal at position %d (%q)", tc.name, i, tc.expectedRemovals[i]) + } + for i := len(tc.expectedRemovals); i < len(ms.removed); i++ { + t.Errorf("[%s] removed unexpected metadata at position %d (%q)", tc.name, i, ms.removed[i]) + } + } +} + +func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { + meta := metadata.V2Metadata{ + Digest: digest.Digest(dgst), + SourceRepository: sourceRepo, + } + + meta.HMAC = metadata.ComputeV2MetadataHMAC([]byte(key), &meta) + return meta +} + +type mockRepo struct { + t *testing.T + errors map[digest.Digest]error + blobs map[digest.Digest]distribution.Descriptor + requests []string +} + +var _ distribution.Repository = &mockRepo{} + +func (m *mockRepo) Named() reference.Named { + m.t.Fatalf("Named() not implemented") + return nil +} +func (m *mockRepo) Manifests(ctc context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + m.t.Fatalf("Manifests() not implemented") + return nil, nil +} +func (m *mockRepo) Tags(ctc context.Context) distribution.TagService { + m.t.Fatalf("Tags() not implemented") + return nil +} +func (m *mockRepo) Blobs(ctx context.Context) distribution.BlobStore { + return &mockBlobStore{ + repo: m, + } +} + +type mockBlobStore struct { + repo *mockRepo +} + +var _ distribution.BlobStore = &mockBlobStore{} + +func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + m.repo.requests = append(m.repo.requests, dgst.String()) + if err, exists := m.repo.errors[dgst]; exists { + return distribution.Descriptor{}, err + } + if desc, exists := m.repo.blobs[dgst]; exists { + return desc, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} +func (m *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + m.repo.t.Fatal("Get() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + m.repo.t.Fatal("Open() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + m.repo.t.Fatal("Put() not implemented") + return distribution.Descriptor{}, nil +} + +func (m *mockBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Create() not implemented") + return nil, nil +} +func (m *mockBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Resume() not implemented") + return nil, nil +} +func (m *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + m.repo.t.Fatal("Delete() not implemented") + return nil +} +func (m *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + m.repo.t.Fatalf("ServeBlob() not implemented") + return nil +} + +type mockV2MetadataService struct { + added []metadata.V2Metadata + removed []metadata.V2Metadata +} + +var _ metadata.V2MetadataService = &mockV2MetadataService{} + +func (*mockV2MetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) { + return nil, nil +} +func (*mockV2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + return "", nil +} +func (m *mockV2MetadataService) Add(diffID layer.DiffID, metadata metadata.V2Metadata) error { + m.added = append(m.added, metadata) + return nil +} +func (m *mockV2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta metadata.V2Metadata) error { + meta.HMAC = metadata.ComputeV2MetadataHMAC(hmacKey, &meta) + m.Add(diffID, meta) + return nil +} +func (m *mockV2MetadataService) Remove(metadata metadata.V2Metadata) error { + m.removed = append(m.removed, metadata) + return nil +} + +type progressSink struct { + t *testing.T +} + +func (s *progressSink) WriteProgress(p progress.Progress) error { + s.t.Logf("progress update: %#+v", p) + return nil +} diff --git a/distribution/registry.go b/distribution/registry.go new file mode 100644 index 0000000..bce270a --- /dev/null +++ b/distribution/registry.go @@ -0,0 +1,156 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// ImageTypes represents the schema2 config types for images +var ImageTypes = []string{ + schema2.MediaTypeImageConfig, + // Handle unexpected values from https://github.com/docker/distribution/issues/1621 + // (see also https://github.com/docker/docker/issues/22378, + // https://github.com/docker/docker/issues/30083) + "application/octet-stream", + "application/json", + "text/html", + // Treat defaulted values as images, newer types cannot be implied + "", +} + +// PluginTypes represents the schema2 config types for plugins +var PluginTypes = []string{ + schema2.MediaTypePluginConfig, +} + +var mediaTypeClasses map[string]string + +func init() { + // initialize media type classes with all know types for + // plugin + mediaTypeClasses = map[string]string{} + for _, t := range ImageTypes { + mediaTypeClasses[t] = "image" + } + for _, t := range PluginTypes { + mediaTypeClasses[t] = "plugin" + } +} + +// NewV2Repository returns a repository (v2 only). It creates an HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.Name.Name() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = reference.Path(repoInfo.Name) + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + scope := auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + Class: repoInfo.Class, + } + + creds := registry.NewStaticCredentialStore(authConfig) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := reference.WithName(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/distribution/registry_unit_test.go b/distribution/registry_unit_test.go new file mode 100644 index 0000000..910061f --- /dev/null +++ b/distribution/registry_unit_test.go @@ -0,0 +1,172 @@ +package distribution + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +const secretRegistryToken = "mysecrettoken" + +type tokenPassThruHandler struct { + reached bool + gotToken bool + shouldSend401 func(url string) bool +} + +func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.reached = true + if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { + logrus.Debug("Detected registry token in auth header") + h.gotToken = true + } + if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + } +} + +func testTokenPassThru(t *testing.T, ts *httptest.Server) { + tmp, err := testDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + uri, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("could not parse url from test server: %v", err) + } + + endpoint := registry.APIEndpoint{ + Mirror: false, + URL: uri, + Version: 2, + Official: false, + TrimHostname: false, + TLSConfig: nil, + } + n, _ := reference.ParseNormalizedNamed("testremotename") + repoInfo := ®istry.RepositoryInfo{ + Name: n, + Index: ®istrytypes.IndexInfo{ + Name: "testrepo", + Mirrors: nil, + Secure: false, + Official: false, + }, + Official: false, + } + imagePullConfig := &ImagePullConfig{ + Config: Config{ + MetaHeaders: http.Header{}, + AuthConfig: &types.AuthConfig{ + RegistryToken: secretRegistryToken, + }, + }, + Schema2Types: ImageTypes, + } + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + t.Fatal(err) + } + p := puller.(*v2Puller) + ctx := context.Background() + p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + t.Fatal(err) + } + + logrus.Debug("About to pull") + // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above + tag, _ := reference.WithTag(n, "tag_goes_here") + _ = p.pullV2Repository(ctx, tag) +} + +func TestTokenPassThru(t *testing.T) { + handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }} + ts := httptest.NewServer(handler) + defer ts.Close() + + testTokenPassThru(t, ts) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if !handler.gotToken { + t.Fatal("Failed to receive registry token") + } +} + +func TestTokenPassThruDifferentHost(t *testing.T) { + handler := new(tokenPassThruHandler) + ts := httptest.NewServer(handler) + defer ts.Close() + + tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v2/" { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + return + } + http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently) + })) + defer tsredirect.Close() + + testTokenPassThru(t, tsredirect) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if handler.gotToken { + t.Fatal("Redirect should not forward Authorization header to another host") + } +} + +// testDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func testDirectory(templateDir string) (dir string, err error) { + testID := stringid.GenerateNonCryptoID()[:4] + prefix := fmt.Sprintf("docker-test%s-%s-", testID, getCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.NewDefaultArchiver().CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// getCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func getCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} diff --git a/distribution/utils/progress.go b/distribution/utils/progress.go new file mode 100644 index 0000000..cc3632a --- /dev/null +++ b/distribution/utils/progress.go @@ -0,0 +1,44 @@ +package utils + +import ( + "io" + "net" + "os" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// WriteDistributionProgress is a helper for writing progress from chan to JSON +// stream with an optional cancel function. +func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} diff --git a/distribution/xfer/download.go b/distribution/xfer/download.go new file mode 100644 index 0000000..e630670 --- /dev/null +++ b/distribution/xfer/download.go @@ -0,0 +1,504 @@ +package xfer + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/resin-os/librsync-go" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStores map[string]layer.Store + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent downloads for each pull +func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { + ldm.tm.SetConcurrency(concurrency) +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { + manager := LayerDownloadManager{ + layerStores: layerStores, + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + Size() int64 + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Return the DeltaBase if any + DeltaBase() io.ReadSeeker + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + // Assume that the platform is the host OS if blank + if platform == "" { + platform = layer.Platform(runtime.GOOS) + } + + totalProgress := progress.NewProgressSink(progressOutput, 0, "Total", "") + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStores[string(platform)].Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + // Register this repository as a source of this layer. + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(diffID) + } + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, platform) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + totalProgress.Size += descriptor.Size() + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, platform, totalProgress) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, platform, totalProgress) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, platform layer.Platform, totalProgress io.Writer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStores[string(platform)], + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(ldm.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(io.TeeReader(reader, totalProgress)) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + layerData := inflatedLayerData + + deltaBase := descriptor.DeltaBase() + + if deltaBase != nil { + pR, pW := io.Pipe() + go func() { + tr := tar.NewReader(inflatedLayerData) + + _, err := tr.Next() + if err == io.EOF { + d.err = fmt.Errorf("unexpected EOF. Invalid delta tar archive") + pW.CloseWithError(err) + return + } + + err = librsync.Patch(deltaBase, tr, pW) + if err != nil { + pW.CloseWithError(err) + } + + pW.Close() + }() + + layerData = pR + } + + var src distribution.Descriptor + if fs, ok := descriptor.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerData, parentLayer, platform, src) + } else { + d.layer, err = d.layerStore.Register(layerData, parentLayer, platform) + } + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, platform layer.Platform) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStores[string(platform)], + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, platform, src) + } else { + d.layer, err = d.layerStore.Register(layerReader, parentLayer, platform) + } + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/distribution/xfer/download_test.go b/distribution/xfer/download_test.go new file mode 100644 index 0000000..0d1fa19 --- /dev/null +++ b/distribution/xfer/download_test.go @@ -0,0 +1,382 @@ +package xfer + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +const maxDownloadConcurrency = 3 + +type mockLayer struct { + layerData bytes.Buffer + diffID layer.DiffID + chainID layer.ChainID + parent layer.Layer + platform layer.Platform +} + +func (ml *mockLayer) TarStream() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil +} + +func (ml *mockLayer) TarSeekStream() (ioutils.ReadSeekCloser, error) { + return nil, fmt.Errorf("not implemented") +} + +func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, fmt.Errorf("not implemented") +} + +func (ml *mockLayer) ChainID() layer.ChainID { + return ml.chainID +} + +func (ml *mockLayer) DiffID() layer.DiffID { + return ml.diffID +} + +func (ml *mockLayer) Parent() layer.Layer { + return ml.parent +} + +func (ml *mockLayer) Size() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) Platform() layer.Platform { + return ml.platform +} + +func (ml *mockLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +type mockLayerStore struct { + layers map[layer.ChainID]*mockLayer +} + +func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) +} + +func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer { + layers := map[layer.ChainID]layer.Layer{} + + for k, v := range ls.layers { + layers[k] = v + } + + return layers +} + +func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, platform layer.Platform) (layer.Layer, error) { + return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) +} + +func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) { + var ( + parent layer.Layer + err error + ) + + if parentID != "" { + parent, err = ls.Get(parentID) + if err != nil { + return nil, err + } + } + + l := &mockLayer{parent: parent} + _, err = l.layerData.ReadFrom(reader) + if err != nil { + return nil, err + } + l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) + l.chainID = createChainIDFromParent(parentID, l.diffID) + + ls.layers[l.chainID] = l + return l, nil +} + +func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { + l, ok := ls.layers[chainID] + if !ok { + return nil, layer.ErrLayerDoesNotExist + } + return l, nil +} + +func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { + return []layer.Metadata{}, nil +} +func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, *layer.CreateRWLayerOpts) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { + return nil, errors.New("not implemented") +} +func (ls *mockLayerStore) GetMountID(string) (string, error) { + return "", errors.New("not implemented") +} + +func (ls *mockLayerStore) Cleanup() error { + return nil +} + +func (ls *mockLayerStore) DriverStatus() [][2]string { + return [][2]string{} +} + +func (ls *mockLayerStore) DriverName() string { + return "mock" +} + +type mockDownloadDescriptor struct { + currentDownloads *int32 + id string + diffID layer.DiffID + registeredDiffID layer.DiffID + expectedDiffID layer.DiffID + simulateRetries int + size int64 +} + +func (d *mockDownloadDescriptor) DeltaBase() io.ReadSeeker { + // TODO implement a test for DeltaBase + return nil +} + +func (d *mockDownloadDescriptor) Size() int64 { + return d.size +} + +// Key returns the key used to deduplicate downloads. +func (d *mockDownloadDescriptor) Key() string { + return d.id +} + +// ID returns the ID for display purposes. +func (d *mockDownloadDescriptor) ID() string { + return d.id +} + +// DiffID should return the DiffID for this layer, or an error +// if it is unknown (for example, if it has not been downloaded +// before). +func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { + if d.diffID != "" { + return d.diffID, nil + } + return "", errors.New("no diffID available") +} + +func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { + d.registeredDiffID = diffID +} + +func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { + // The mock implementation returns the ID repeated 5 times as a tar + // stream instead of actual tar data. The data is ignored except for + // computing IDs. + return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) +} + +// Download is called to perform the download. +func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + if d.currentDownloads != nil { + defer atomic.AddInt32(d.currentDownloads, -1) + + if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { + return nil, 0, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming download. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) + } + } + + if d.simulateRetries != 0 { + d.simulateRetries-- + return nil, 0, errors.New("simulating retry") + } + + return d.mockTarStream(), 0, nil +} + +func (d *mockDownloadDescriptor) Close() { +} + +func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { + return []DownloadDescriptor{ + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id1", + expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id3", + expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id4", + expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), + simulateRetries: 1, + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id5", + expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), + }, + } +} + +func TestSuccessfulDownload(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + lsMap := make(map[string]layer.Store) + lsMap[runtime.GOOS] = layerStore + ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]progress.Progress) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p + } + close(progressDone) + }() + + var currentDownloads int32 + descriptors := downloadDescriptors(¤tDownloads) + + firstDescriptor := descriptors[0].(*mockDownloadDescriptor) + + // Pre-register the first layer to simulate an already-existing layer + l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", layer.Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + firstDescriptor.diffID = l.DiffID() + + rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), layer.Platform(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("download error: %v", err) + } + + releaseFunc() + + close(progressChan) + <-progressDone + + if len(rootFS.DiffIDs) != len(descriptors) { + t.Fatal("got wrong number of diffIDs in rootfs") + } + + for i, d := range descriptors { + descriptor := d.(*mockDownloadDescriptor) + + if descriptor.diffID != "" { + if receivedProgress[d.ID()].Action != "Already exists" { + t.Fatalf("did not get 'Already exists' message for %v", d.ID()) + } + } else if receivedProgress[d.ID()].Action != "Pull complete" { + t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) + } + + if rootFS.DiffIDs[i] != descriptor.expectedDiffID { + t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) + } + + if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { + t.Fatal("diffID mismatch between rootFS and Registered callback") + } + } +} + +func TestCancelledDownload(t *testing.T) { + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + lsMap := make(map[string]layer.Store) + lsMap[runtime.GOOS] = layerStore + ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := downloadDescriptors(nil) + _, _, err := ldm.Download(ctx, *image.NewRootFS(), layer.Platform(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected download to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/distribution/xfer/transfer.go b/distribution/xfer/transfer.go new file mode 100644 index 0000000..b86c503 --- /dev/null +++ b/distribution/xfer/transfer.go @@ -0,0 +1,401 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) + // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload + SetConcurrency(concurrency int) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// SetConcurrency sets the concurrencyLimit +func (tm *transferManager) SetConcurrency(concurrency int) { + tm.mu.Lock() + tm.concurrencyLimit = concurrency + tm.mu.Unlock() +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/distribution/xfer/transfer_test.go b/distribution/xfer/transfer_test.go new file mode 100644 index 0000000..6c50ce3 --- /dev/null +++ b/distribution/xfer/transfer_test.go @@ -0,0 +1,410 @@ +package xfer + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/pkg/progress" +) + +func TestTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + select { + case <-start: + default: + t.Fatalf("transfer function not started even though concurrency limit not reached") + } + + xfer := NewTransfer() + go func() { + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + val, present := receivedProgress[p.ID] + if present && p.Current <= val { + t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) + } + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start a few transfers + ids := []string{"id1", "id2", "id3"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestConcurrencyLimit(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestInactiveJobs(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + testDone := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(inactive) + <-testDone + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + close(testDone) + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestWatchRelease(t *testing.T) { + ready := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type watcherInfo struct { + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(w watcherInfo) { + first := true + for range w.progressChan { + if first { + close(w.receivedFirstProgress) + } + first = false + } + close(w.progressDone) + } + + // Start a transfer + watchers := make([]watcherInfo, 5) + var xfer Transfer + watchers[0].progressChan = make(chan progress.Progress) + watchers[0].progressDone = make(chan struct{}) + watchers[0].receivedFirstProgress = make(chan struct{}) + xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) + go progressConsumer(watchers[0]) + + // Give it multiple watchers + for i := 1; i != len(watchers); i++ { + watchers[i].progressChan = make(chan progress.Progress) + watchers[i].progressDone = make(chan struct{}) + watchers[i].receivedFirstProgress = make(chan struct{}) + watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) + go progressConsumer(watchers[i]) + } + + // Now that the watchers are set up, allow the transfer goroutine to + // proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, w := range watchers { + <-w.receivedFirstProgress + } + + // Release one watcher every 5ms + for _, w := range watchers { + xfer.Release(w.watcher) + <-time.After(5 * time.Millisecond) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() + + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-xfer.Done() + + for _, w := range watchers { + close(w.progressChan) + <-w.progressDone + } +} + +func TestWatchFinishedTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + // Finish immediately + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + + // Start a transfer + watchers := make([]*Watcher, 3) + var xfer Transfer + xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) + + // Give it a watcher immediately + watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Wait for the transfer to complete + <-xfer.Done() + + // Set up another watcher + watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Release the watchers + for _, w := range watchers { + xfer.Release(w) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() +} + +func TestDuplicateTransfer(t *testing.T) { + ready := make(chan struct{}) + + var xferFuncCalls int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + atomic.AddInt32(&xferFuncCalls, 1) + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type transferInfo struct { + xfer Transfer + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(t transferInfo) { + first := true + for range t.progressChan { + if first { + close(t.receivedFirstProgress) + } + first = false + } + close(t.progressDone) + } + + // Try to start multiple transfers with the same ID + transfers := make([]transferInfo, 5) + for i := range transfers { + t := &transfers[i] + t.progressChan = make(chan progress.Progress) + t.progressDone = make(chan struct{}) + t.receivedFirstProgress = make(chan struct{}) + t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) + go progressConsumer(*t) + } + + // Allow the transfer goroutine to proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, t := range transfers { + <-t.receivedFirstProgress + } + + // Confirm that the transfer function was called exactly once. + if xferFuncCalls != 1 { + t.Fatal("transfer function wasn't called exactly once") + } + + // Release one watcher every 5ms + for _, t := range transfers { + t.xfer.Release(t.watcher) + <-time.After(5 * time.Millisecond) + } + + for _, t := range transfers { + // Now that all watchers have been released, Released() should + // return a closed channel. + <-t.xfer.Released() + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-t.xfer.Done() + } + + for _, t := range transfers { + close(t.progressChan) + <-t.progressDone + } +} diff --git a/distribution/xfer/upload.go b/distribution/xfer/upload.go new file mode 100644 index 0000000..58422e5 --- /dev/null +++ b/distribution/xfer/upload.go @@ -0,0 +1,174 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent uploads for each push +func (lum *LayerUploadManager) SetConcurrency(concurrency int) { + lum.tm.SetConcurrency(concurrency) +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager { + manager := LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(lum.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/distribution/xfer/upload_test.go b/distribution/xfer/upload_test.go new file mode 100644 index 0000000..066019f --- /dev/null +++ b/distribution/xfer/upload_test.go @@ -0,0 +1,134 @@ +package xfer + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadConcurrency = 3 + +type mockUploadDescriptor struct { + currentUploads *int32 + diffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (u *mockUploadDescriptor) Key() string { + return u.diffID.String() +} + +// ID returns the ID for display purposes. +func (u *mockUploadDescriptor) ID() string { + return u.diffID.String() +} + +// DiffID should return the DiffID for this layer. +func (u *mockUploadDescriptor) DiffID() layer.DiffID { + return u.diffID +} + +// SetRemoteDescriptor is not used in the mock. +func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { +} + +// Upload is called to perform the upload. +func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if u.currentUploads != nil { + defer atomic.AddInt32(u.currentUploads, -1) + + if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { + return distribution.Descriptor{}, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming upload. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return distribution.Descriptor{}, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) + } + } + + if u.simulateRetries != 0 { + u.simulateRetries-- + return distribution.Descriptor{}, errors.New("simulating retry") + } + + return distribution.Descriptor{}, nil +} + +func uploadDescriptors(currentUploads *int32) []UploadDescriptor { + return []UploadDescriptor{ + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, + } +} + +func TestSuccessfulUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + var currentUploads int32 + descriptors := uploadDescriptors(¤tUploads) + + err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("upload error: %v", err) + } + + close(progressChan) + <-progressDone +} + +func TestCancelledUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond }) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := uploadDescriptors(nil) + err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected upload to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/dockerversion/useragent.go b/dockerversion/useragent.go new file mode 100644 index 0000000..53632cb --- /dev/null +++ b/dockerversion/useragent.go @@ -0,0 +1,76 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/dockerversion/version_lib.go b/dockerversion/version_lib.go new file mode 100644 index 0000000..33f77d3 --- /dev/null +++ b/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" + ContainerdCommitID string = "library-import" + RuncCommitID string = "library-import" + InitCommitID string = "library-import" +) diff --git a/docs/api/v1.18.md b/docs/api/v1.18.md new file mode 100644 index 0000000..3c82371 --- /dev/null +++ b/docs/api/v1.18.md @@ -0,0 +1,2159 @@ +--- +title: "Engine API v1.18" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.18/ +- /reference/api/docker_remote_api_v1.18/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.18/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.18/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpuShares": 0, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.18/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.18/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.18/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.18/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.18/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.18/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.18/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.18/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /v1.18/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.18/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.18/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.18/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.18/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.18/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.18/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.18/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.18/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.18/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Debug": 0, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": 1, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": 1, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": 0, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.18/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.18" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.18/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.18/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.18/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.18/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. + + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.18/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + +This might change in the future. + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/api/v1.19.md b/docs/api/v1.19.md new file mode 100644 index 0000000..bd5f09f --- /dev/null +++ b/docs/api/v1.19.md @@ -0,0 +1,2241 @@ +--- +title: "Engine API v1.19" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.19/ +- /reference/api/docker_remote_api_v1.19/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.19/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.19/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `syslog` available options are: `address`. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.19/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.19/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.19/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.19/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.19/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.19/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.19/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.19/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.19/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.19/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the + URI specifies a filename, the file's contents are placed into a file + called `Dockerfile`. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.19/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.19/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.19/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.19/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.19/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). This API +returns both `is_trusted` and `is_automated` images. Currently, they +are considered identical. In the future, the `is_trusted` property will +be deprecated and replaced by the `is_automated` property. + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.19/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.19/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.19/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.19/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.19" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.19/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.19/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.19/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.19/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.19/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/api/v1.20.md b/docs/api/v1.20.md new file mode 100644 index 0000000..a7fc999 --- /dev/null +++ b/docs/api/v1.20.md @@ -0,0 +1,2394 @@ +--- +title: "Engine API v1.20" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.20/ +- /reference/api/docker_remote_api_v1.20/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.20/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.20/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.20/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.20/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.20/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.20/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.20/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.20/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.20/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.20/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.20/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.20/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.20/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.20/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.20/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.20/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.20/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.20/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.20/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.20/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.20/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.20/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.20/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.20/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.20/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.20/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.20/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.20/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/api/v1.21.md b/docs/api/v1.21.md new file mode 100644 index 0000000..360575b --- /dev/null +++ b/docs/api/v1.21.md @@ -0,0 +1,2981 @@ +--- +title: "Engine API v1.21" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.21/ +- /reference/api/docker_remote_api_v1.21/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.21/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.21/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "" + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Example request, with size information**: + + GET /v1.21/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.21/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.21/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.21/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.21/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.21/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.21/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.21/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.21/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.21/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.21/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.21/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.21/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.21/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.21/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.21/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.21/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.21/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.21/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.21/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.21/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.21/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.21/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.21/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.21/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.21/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.21/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"pull","id":"busybox:latest","time":1442421700,"timeNano":1442421700598988358} + {"status":"create","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716853979870} + {"status":"attach","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716894759198} + {"status":"start","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716983607193} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.21/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.21/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Status" : "running", + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "" + } + } + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.21/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.21/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.21/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.21/networks HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: `name=[network-names]` , `id=[network-ids]` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.21/networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.21/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + } + ] + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, + there is no guaranteed way to check for duplicates across a cluster of docker hosts. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/api/v1.22.md b/docs/api/v1.22.md new file mode 100644 index 0000000..9e3153c --- /dev/null +++ b/docs/api/v1.22.md @@ -0,0 +1,3319 @@ +--- +title: "Engine API v1.22" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.22/ +- /reference/api/docker_remote_api_v1.22/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.22/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + } + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + } + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + } + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + } + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.22/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `splunk`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.22/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.22/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.22/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update resource configs of one or more containers. + +**Example request**: + + POST /v1.22/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800 + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.22/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.22/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.22/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.22/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.22/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.22/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.22/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.22/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.22/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.22/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.22/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.22/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.22/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.22/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.22/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.22/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.22/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.22/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.22/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.22/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.22/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.10.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.22", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.22/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.22/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.22/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.10.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.22/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.22/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.22/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.22/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.22/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.22/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.22/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.22/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/api/v1.23.md b/docs/api/v1.23.md new file mode 100644 index 0000000..8f7b60c --- /dev/null +++ b/docs/api/v1.23.md @@ -0,0 +1,3436 @@ +--- +title: "Engine API v1.23" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.23/ +- /reference/api/docker_remote_api_v1.23/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.23/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.23/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.23/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.23/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.23/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.23/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.23/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.23/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.23/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.23/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.23/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.23/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.23/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.23/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.23/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.23/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.23/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.23/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.23/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.23/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.23/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.23/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.23/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.23/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.23/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.23/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.23/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.11.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.23", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.23/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.23/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.23/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.11.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.23/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.23/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.23/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.23/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.23/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.23/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.23/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.23/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.23/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/api/v1.24.md b/docs/api/v1.24.md new file mode 100644 index 0000000..98095cf --- /dev/null +++ b/docs/api/v1.24.md @@ -0,0 +1,5348 @@ +--- +title: "Engine API v1.24" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.24/ +- /reference/api/docker_remote_api_v1.24/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Errors + +The Engine API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + { + "message": "page not found" + } + +The status codes that are returned for each endpoint are specified in the endpoint documentation below. + +## 3. Endpoints + +### 3.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.24/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.24/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "Healthcheck":{ + "Test": ["CMD-SHELL", "curl localhost:3000"], + "Interval": 1000000000, + "Timeout": 10000000000, + "Retries": 10, + "StartPeriod": 60000000000 + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuPercent": 80, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Sysctls": { "net.ipv4.ip_forward": "1" }, + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "StorageOpt": {}, + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033", + "LinkLocalIPs":["169.254.34.68", "fe80::3468"] + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. This must be a valid RFC 1123 hostname. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **Healthcheck** - A test to perform to check that the container is healthy. + - **Test** - The test to perform. Possible values are: + + `{}` inherit healthcheck from image or parent image + + `{"NONE"}` disable healthcheck + + `{"CMD", args...}` exec arguments directly + + `{"CMD-SHELL", command}` run command with system's default shell + - **Interval** - The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + - **Timeout** - The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + - **Retries** - The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. + - **StartPeriod** - The time to wait for container initialization before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuPercent** - An integer value containing the usable percentage of the available CPUs. (Windows daemon only) + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **IOMaximumBandwidth** - Maximum IO absolute rate in terms of IOps. + - **IOMaximumIOps** - Maximum IO absolute rate in terms of bytes per second. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWriteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **Sysctls** - A list of kernel parameters (sysctls) to set in the container, specified as + `{ : }`, for example: + `{ "net.ipv4.ip_forward": "1" }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **StorageOpt**: Storage driver options per container. Options can be passed in the form + `{"size":"120G"}` + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuPercent": 80, + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "Sysctls": { + "net.ipv4.ip_forward": "1" + }, + "StorageOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.24/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **details** - 1/True/true or 0/False/flase, Show extra details provided to logs. Default `false`. +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.24/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The `precpu_stats` is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the `cpu_stats` field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.24/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.24/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.24/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.24/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.24/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.24/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.24/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. +- **link** - 1/True/true or 0/False/false, Remove the specified + link associated to the container. Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.24/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.24/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 3.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.24/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.24/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.24/build HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/x-tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.24/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.24/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.24/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.24/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.24/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.24/images/test/tag?repo=myrepo&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.24/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.24/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search +- **limit** – maximum returned search results +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 3.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.24/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.24/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SecurityOptions": [ + "apparmor", + "seccomp", + "selinux" + ], + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.24/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.12.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.6.3", + "GitCommit": "deadbee", + "Arch": "amd64", + "ApiVersion": "1.24", + "BuildTime": "2016-06-14T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.24/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.24/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +Docker daemon report the following event: + + reload + +**Example request**: + + GET /v1.24/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.12.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` or `daemon` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + - `daemon=`; -- daemon name or id to filter + +**Status codes**: + +- **200** – no error +- **400** - bad parameter +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.24/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.24/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 3.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.24/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": null, + "Scope": "local" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all volumes that are "dangling" (not in use by a container). When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. + - `driver=` Matches all or part of a volume driver name. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.24/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Driver": "custom" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +**JSON fields in response**: + +Refer to the [inspect a volume](#inspect-a-volume) section or details about the +JSON fields returned in the response. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.24/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +**JSON fields in response**: + +The following fields can be returned in the API response. Empty fields, or +fields that are not supported by the volume's driver may be omitted in the +response. + +- **Name** - Name of the volume. +- **Driver** - Name of the volume driver used by the volume. +- **Mountpoint** - Mount path of the volume on the host. +- **Status** - Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. + The `Status` field is optional, and is omitted if the volume driver does not + support this feature. +- **Labels** - Labels set on the volume, specified as a map: `{"key":"value","key2":"value2"}`. +- **Scope** - Scope describes the level at which the volume exists, can be one of + `global` for cluster-wide or `local` for machine level. The default is `local`. + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.24/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.24/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network id. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/(id or name)` + +Return low-level information on the network `id` + +**Example request**: + + GET /v1.24/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found +- **500** - server error + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.24/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **403** - operation not supported for pre-defined networks +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false`. + Since Network is primarily keyed based on a random ID and not on the name, + and network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed way to check for duplicates. + This parameter CheckDuplicate is there to provide a best effort checking of any networks + which has the same name but it is not guaranteed to catch all name collisions. +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id or name)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id or name)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id or name)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **403** - operation not supported for pre-defined networks +- **404** - no such network +- **500** - server error + +### 3.6 Plugins (experimental) + +#### List plugins + +`GET /plugins` + +Returns information about installed plugins. + +**Example request**: + + GET /v1.24/plugins HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } + } +] +``` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Install a plugin + +`POST /plugins/pull?name=` + +Pulls and installs a plugin. After the plugin is installed, it can be enabled +using the [`POST /plugins/(plugin name)/enable` endpoint](#enable-a-plugin). + +**Example request**: + +``` +POST /v1.24/plugins/pull?name=tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. When using +this endpoint to pull a plugin from the registry, the `X-Registry-Auth` header +can be used to include a base64-encoded AuthConfig object. Refer to the [create +an image](#create-an-image) section for more details. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 175 + +[ + { + "Name": "network", + "Description": "", + "Value": [ + "host" + ] + }, + { + "Name": "mount", + "Description": "", + "Value": [ + "/data" + ] + }, + { + "Name": "device", + "Description": "", + "Value": [ + "/dev/cpu_dma_latency" + ] + } +] +``` + +**Query parameters**: + +- **name** - Name of the plugin to pull. The name may include a tag or digest. + This parameter is required. + +**Status codes**: + +- **200** - no error +- **500** - error parsing reference / not a valid repository/tag: repository + name must have at least one component +- **500** - plugin already exists + +#### Inspect a plugin + +`GET /plugins/(plugin name)` + +Returns detailed information about an installed plugin. + +**Example request**: + +``` +GET /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": false, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed + +#### Enable a plugin + +`POST /plugins/(plugin name)/enable` + +Enables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/enable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is already enabled + +#### Disable a plugin + +`POST /plugins/(plugin name)/disable` + +Disables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/disable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is already disabled + +#### Remove a plugin + +`DELETE /plugins/(plugin name)` + +Removes a plugin + +**Example request**: + +``` +DELETE /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is active + + + +### 3.7 Nodes + +**Note**: Node operations require the engine to be part of a swarm. + +#### List nodes + + +`GET /nodes` + +List nodes + +**Example request**: + + GET /v1.24/nodes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + nodes list. Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a node + + +`GET /nodes/(id or name)` + +Return low-level information on the node `id` + +**Example request**: + + GET /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Remove a node + + +`DELETE /nodes/(id or name)` + +Remove a node from the swarm. + +**Example request**: + + DELETE /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - 1/True/true or 0/False/false, Force remove a node from the swarm. + Default `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Update a node + + +`POST /nodes/(id)/update` + +Update a node. + +The payload of the `POST` request is the new `NodeSpec` and +overrides the current `NodeSpec` for the specified node. + +If `Availability` or `Role` are omitted, this returns an +error. Any other field omitted resets the current value to either +an empty value or the default cluster-wide value. + +**Example Request** + + POST /v1.24/nodes/24ifsmvkjbyhk/update?version=8 HTTP/1.1 + Content-Type: application/json + + { + "Availability": "active", + "Name": "node-name", + "Role": "manager", + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the node object being updated. This is + required to avoid conflicting writes. + +JSON Parameters: + +- **Annotations** – Optional medata to associate with the node. + - **Name** – User-defined name for the node. + - **Labels** – A map of labels to associate with the node (e.g., + `{"key":"value", "key2":"value2"}`). +- **Role** - Role of the node (worker|manager). +- **Availability** - Availability of the node (active|pause|drain). + + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +### 3.8 Swarm + +#### Inspect swarm + + +`GET /swarm` + +Inspect swarm + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CreatedAt" : "2016-08-15T16:00:20.349727406Z", + "Spec" : { + "Dispatcher" : { + "HeartbeatPeriod" : 5000000000 + }, + "Orchestration" : { + "TaskHistoryRetentionLimit" : 10 + }, + "CAConfig" : { + "NodeCertExpiry" : 7776000000000000 + }, + "Raft" : { + "LogEntriesForSlowFollowers" : 500, + "HeartbeatTick" : 1, + "SnapshotInterval" : 10000, + "ElectionTick" : 3 + }, + "TaskDefaults" : {}, + "Name" : "default" + }, + "JoinTokens" : { + "Worker" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a", + "Manager" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + }, + "ID" : "70ilmkj2f6sp2137c753w2nmt", + "UpdatedAt" : "2016-08-15T16:32:09.623207604Z", + "Version" : { + "Index" : 51 + } + } + +**Status codes**: + +- **200** - no error +- **406** – node is not part of a swarm +- **500** - sever error + +#### Initialize a new swarm + + +`POST /swarm/init` + +Initialize a new swarm. The body of the HTTP response includes the node ID. + +**Example request**: + + POST /v1.24/swarm/init HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "ForceNewCluster": false, + "Spec": { + "Orchestration": {}, + "Raft": {}, + "Dispatcher": {}, + "CAConfig": {} + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 28 + Content-Type: application/json + Date: Thu, 01 Sep 2016 21:49:13 GMT + Server: Docker/1.12.0 (linux) + + "7v2t30z9blmxuhnyo6s4cpenp" + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication, as well as determining + the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an + address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is + used. +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **ForceNewCluster** – Force creation of a new swarm. +- **Spec** – Configuration settings for the new swarm. + - **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. + - **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. + - **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. + - **CAConfig** – Certificate authority configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. + +#### Join an existing swarm + +`POST /swarm/join` + +Join an existing swarm + +**Example request**: + + POST /v1.24/swarm/join HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "RemoteAddrs": ["node1:2377"], + "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to + manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **RemoteAddr** – Address of any manager node already participating in the swarm. +- **JoinToken** – Secret token for joining this swarm. + +#### Leave a swarm + + +`POST /swarm/leave` + +Leave a swarm + +**Example request**: + + POST /v1.24/swarm/leave HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - Boolean (0/1, false/true). Force leave swarm, even if this is the last manager or that it will break the cluster. + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** - server error + +#### Update a swarm + + +`POST /swarm/update` + +Update a swarm + +**Example request**: + + POST /v1.24/swarm/update HTTP/1.1 + + { + "Name": "default", + "Orchestration": { + "TaskHistoryRetentionLimit": 10 + }, + "Raft": { + "SnapshotInterval": 10000, + "LogEntriesForSlowFollowers": 500, + "HeartbeatTick": 1, + "ElectionTick": 3 + }, + "Dispatcher": { + "HeartbeatPeriod": 5000000000 + }, + "CAConfig": { + "NodeCertExpiry": 7776000000000000 + }, + "JoinTokens": { + "Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", + "Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + } + + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the swarm object being updated. This is + required to avoid conflicting writes. +- **rotateWorkerToken** - Set to `true` (or `1`) to rotate the worker join token. +- **rotateManagerToken** - Set to `true` (or `1`) to rotate the manager join token. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is not part of a swarm +- **500** - server error + +JSON Parameters: + +- **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. +- **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. +- **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. +- **CAConfig** – CA configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. +- **JoinTokens** - Tokens that can be used by other nodes to join the swarm. + - **Worker** - Token to use for joining as a worker. + - **Manager** - Token to use for joining as a manager. + +### 3.9 Services + +**Note**: Service operations require to first be part of a swarm. + +#### List services + + +`GET /services` + +List services + +**Example request**: + + GET /v1.24/services HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Version": { + "Index": 19 + }, + "CreatedAt": "2016-06-07T21:05:51.880065305Z", + "UpdatedAt": "2016-06-07T21:07:29.962229872Z", + "Spec": { + "Name": "hopeful_cori", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + } + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.2/16" + }, + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.3/16" + } + ] + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `label=` + - `name=` + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** – server error + +#### Create a service + +`POST /services/create` + +Create a service. When using this endpoint to create a service using a private +repository from the registry, the `X-Registry-Auth` header must be used to +include a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "web", + "TaskTemplate": { + "ContainerSpec": { + "Image": "nginx:alpine", + "Mounts": [ + { + "ReadOnly": true, + "Source": "web-data", + "Target": "/usr/share/nginx/html", + "Type": "volume", + "VolumeOptions": { + "DriverConfig": { + }, + "Labels": { + "com.example.something": "something-value" + } + } + } + ], + "User": "33" + }, + "Networks": [ + { + "Target": "overlay1" + } + ], + "LogDriver": { + "Name": "json-file", + "Options": { + "max-file": "3", + "max-size": "10M" + } + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + }, + "Resources": { + "Limits": { + "MemoryBytes": 104857600 + }, + "Reservations": { + } + }, + "RestartPolicy": { + "Condition": "on-failure", + "Delay": 10000000000, + "MaxAttempts": 10 + } + }, + "Mode": { + "Replicated": { + "Replicas": 4 + } + }, + "UpdateConfig": { + "Delay": 30000000000, + "Parallelism": 2, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Ports": [ + { + "Protocol": "tcp", + "PublishedPort": 8080, + "TargetPort": 80 + } + ] + }, + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "ID":"ak7w3gjqoa3kuz8xcpnyy0pvl" + } + +**Status codes**: + +- **201** – no error +- **403** - network is not eligible for services +- **406** – node is not part of a swarm +- **409** – name conflicts with an existing object +- **500** - server error + +**JSON Parameters**: + +- **Name** – User-defined name for the service. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers + created as part of the service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type. + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume. + - **Options** - key/value map of driver specific options. + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **LogDriver** - Log configuration for containers created as part of the + service. + - **Name** - Name of the logging driver to use (`json-file`, `syslog`, + `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`). + - **Options** - Driver-specific options. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **NanoCPUs** – CPU limit in units of 10-9 CPU shares. + - **MemoryBytes** – Memory limit in Bytes. + - **Reservation** – Define resources reservation. + - **NanoCPUs** – CPU reservation in units of 10-9 CPU shares. + - **MemoryBytes** – Memory reservation in Bytes. + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. + - **FailureAction** - Action to take if an updated task fails to run, or stops running during the + update. Values are `continue` and `pause`. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + + +#### Remove a service + + +`DELETE /services/(id or name)` + +Stop and remove the service `id` + +**Example request**: + + DELETE /v1.24/services/16253994b7c4 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect one or more services + + +`GET /services/(id or name)` + +Return information on the service `id`. + +**Example request**: + + GET /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha HTTP/1.1 + +**Example response**: + + { + "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl", + "Version": { + "Index": 95 + }, + "CreatedAt": "2016-06-07T21:10:20.269723157Z", + "UpdatedAt": "2016-06-07T21:10:20.276301259Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.4/16" + } + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Update a service + +`POST /services/(id)/update` + +Update a service. When using this endpoint to create a service using a +private repository from the registry, the `X-Registry-Auth` header can be used +to update the authentication information for that is stored for the service. +The header contains a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha/update?version=23 HTTP/1.1 + Content-Type: application/json + + { + "Name": "top", + "TaskTemplate": { + "ContainerSpec": { + "Image": "busybox", + "Args": [ + "top" + ] + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1 + }, + "EndpointSpec": { + "Mode": "vip" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**JSON Parameters**: + +- **Name** – User-defined name for the service. Note that renaming services is not supported. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers created as part of the new + service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume + - **Options** - key/value map of driver specific options + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **CPU** – CPU limit + - **Memory** – Memory limit + - **Reservation** – Define resources reservation. + - **CPU** – CPU reservation + - **Memory** – Memory reservation + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Query parameters**: + +- **version** – The version number of the service object being updated. This is + required to avoid conflicting writes. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +### 3.10 Tasks + +**Note**: Task operations require the engine to be part of a swarm. + +#### List tasks + + +`GET /tasks` + +List tasks + +**Example request**: + + GET /v1.24/tasks HTTP/1.1 + +**Example response**: + + [ + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ], + }, + { + "ID": "1yljwbmlr8er2waf8orvqpwms", + "Version": { + "Index": 30 + }, + "CreatedAt": "2016-06-07T21:07:30.019104782Z", + "UpdatedAt": "2016-06-07T21:07:30.231958098Z", + "Name": "hopeful_cori", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:30.202183143Z", + "State": "shutdown", + "Message": "shutdown", + "ContainerStatus": { + "ContainerID": "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + } + }, + "DesiredState": "shutdown", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.5/16" + ] + } + ] + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a task + + +`GET /tasks/(id)` + +Get details on the task `id` + +**Example request**: + + GET /v1.24/tasks/0kzzo1i0y4jz6027t0k7aezc7 HTTP/1.1 + +**Example response**: + + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – unknown task +- **406** - node is not part of a swarm +- **500** – server error + +## 4. Going further + +### 4.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 4.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 4.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/docs/api/version-history.md b/docs/api/version-history.md new file mode 100644 index 0000000..aa089ed --- /dev/null +++ b/docs/api/version-history.md @@ -0,0 +1,318 @@ +--- +title: "Engine API version history" +description: "Documentation of changes that have been made to Engine API." +keywords: "API, Docker, rcli, REST, documentation" +--- + + + +## v1.31 API changes + +[Docker Engine API v1.31](https://docs.docker.com/engine/api/v1.31/) documentation + +* `DELETE /secrets/(name)` now returns status code 404 instead of 500 when the secret does not exist. +* `POST /secrets/create` now returns status code 409 instead of 500 when creating an already existing secret. +* `POST /secrets/(name)/update` now returns status code 400 instead of 500 when updating a secret's content which is not the labels. +* `POST /nodes/(name)/update` now returns status code 400 instead of 500 when demoting last node fails. +* `GET /networks/(id or name)` now takes an optional query parameter `scope` that will filter the network based on the scope (`local`, `swarm`, or `global`). +* `POST /session` is a new endpoint that can be used for running interactive long-running protocols between client and + the daemon. This endpoint is experimental and only available if the daemon is started with experimental features + enabled. +* `GET /images/(name)/get` now includes an `ImageMetadata` field which contains image metadata that is local to the engine and not part of the image config. + +## v1.30 API changes + +[Docker Engine API v1.30](https://docs.docker.com/engine/api/v1.30/) documentation + +* `GET /info` now returns the list of supported logging drivers, including plugins. +* `GET /info` and `GET /swarm` now returns the cluster-wide swarm CA info if the node is in a swarm: the cluster root CA certificate, and the cluster TLS + leaf certificate issuer's subject and public key. It also displays the desired CA signing certificate, if any was provided as part of the spec. +* `POST /build/` now (when not silent) produces an `Aux` message in the JSON output stream with payload `types.BuildResult` for each image produced. The final such message will reference the image resulting from the build. +* `GET /nodes` and `GET /nodes/{id}` now returns additional information about swarm TLS info if the node is part of a swarm: the trusted root CA, and the + issuer's subject and public key. +* `GET /distribution/(name)/json` is a new endpoint that returns a JSON output stream with payload `types.DistributionInspect` for an image name. It includes a descriptor with the digest, and supported platforms retrieved from directly contacting the registry. +* `POST /swarm/update` now accepts 3 additional parameters as part of the swarm spec's CA configuration; the desired CA certificate for + the swarm, the desired CA key for the swarm (if not using an external certificate), and an optional parameter to force swarm to + generate and rotate to a new CA certificate/key pair. +* `POST /service/create` and `POST /services/(id or name)/update` now take the field `Platforms` as part of the service `Placement`, allowing to specify platforms supported by the service. +* `POST /containers/(name)/wait` now accepts a `condition` query parameter to indicate which state change condition to wait for. Also, response headers are now returned immediately to acknowledge that the server has registered a wait callback for the client. + +## v1.29 API changes + +[Docker Engine API v1.29](https://docs.docker.com/engine/api/v1.29/) documentation + +* `DELETE /networks/(name)` now allows to remove the ingress network, the one used to provide the routing-mesh. +* `POST /networks/create` now supports creating the ingress network, by specifying an `Ingress` boolean field. As of now this is supported only when using the overlay network driver. +* `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one. +* `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`). +* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass. +* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. +* `POST /containers/prune`, `POST /images/prune`, `POST /volumes/prune`, and `POST /networks/prune` now support a `label` filter to filter containers, images, volumes, or networks based on the label. The format of the label filter could be `label=`/`label==` to remove those with the specified labels, or `label!=`/`label!==` to remove those without the specified labels. + +## v1.28 API changes + +[Docker Engine API v1.28](https://docs.docker.com/engine/api/v1.28/) documentation + +* `POST /containers/create` now includes a `Consistency` field to specify the consistency level for each `Mount`, with possible values `default`, `consistent`, `cached`, or `delegated`. +* `GET /containers/create` now takes a `DeviceCgroupRules` field in `HostConfig` allowing to set custom device cgroup rules for the created container. +* Optional query parameter `verbose` for `GET /networks/(id or name)` will now list all services with all the tasks, including the non-local tasks on the given network. +* `GET /containers/(id or name)/attach/ws` now returns WebSocket in binary frame format for API version >= v1.28, and returns WebSocket in text frame format for API version< v1.28, for the purpose of backward-compatibility. +* `GET /networks` is optimised only to return list of all networks and network specific information. List of all containers attached to a specific network is removed from this API and is only available using the network specific `GET /networks/{network-id}. +* `GET /containers/json` now supports `publish` and `expose` filters to filter containers that expose or publish certain ports. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `ReadOnly` parameter, which mounts the container's root filesystem as read only. +* `POST /build` now accepts `extrahosts` parameter to specify a host to ip mapping to use during the build. +* `POST /services/create` and `POST /services/(id or name)/update` now accept a `rollback` value for `FailureAction`. +* `POST /services/create` and `POST /services/(id or name)/update` now accept an optional `RollbackConfig` object which specifies rollback options. +* `GET /services` now supports a `mode` filter to filter services based on the service mode (either `global` or `replicated`). +* `POST /containers/(name)/update` now supports updating `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. + +## v1.27 API changes + +[Docker Engine API v1.27](https://docs.docker.com/engine/api/v1.27/) documentation + +* `GET /containers/(id or name)/stats` now includes an `online_cpus` field in both `precpu_stats` and `cpu_stats`. If this field is `nil` then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. + +## v1.26 API changes + +[Docker Engine API v1.26](https://docs.docker.com/engine/api/v1.26/) documentation + +* `POST /plugins/(plugin name)/upgrade` upgrade a plugin. + +## v1.25 API changes + +[Docker Engine API v1.25](https://docs.docker.com/engine/api/v1.25/) documentation + +* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`. +* `GET /version` now returns `MinAPIVersion`. +* `POST /build` accepts `networkmode` parameter to specify network used during build. +* `GET /images/(name)/json` now returns `OsVersion` if populated +* `GET /info` now returns `Isolation`. +* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits. +* `GET /containers/json` and `GET /containers/(id or name)/json` now return `"removing"` as a value for the `State.Status` field if the container is being removed. Previously, "exited" was returned as status. +* `GET /containers/json` now accepts `removing` as a valid value for the `status` filter. +* `GET /containers/json` now supports filtering containers by `health` status. +* `DELETE /volumes/(name)` now accepts a `force` query parameter to force removal of volumes that were already removed out of band by the volume driver plugin. +* `POST /containers/create/` and `POST /containers/(name)/update` now validates restart policies. +* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`). +* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds`, `Volumes`, and `Tmpfs`. *note*: `Binds`, `Volumes`, and `Tmpfs` are still available and can be combined with `Mounts`. +* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions. +* `POST /build` accepts `cachefrom` parameter to specify images used for build cache. +* `GET /networks/` endpoint now correctly returns a list of *all* networks, + instead of the default network if a trailing slash is provided, but no `name` + or `id`. +* `DELETE /containers/(name)` endpoint now returns an error of `removal of container name is already in progress` with status code of 400, when container name is in a state of removal in progress. +* `GET /containers/json` now supports a `is-task` filter to filter + containers that are tasks (part of a service in swarm mode). +* `POST /containers/create` now takes `StopTimeout` field. +* `POST /services/create` and `POST /services/(id or name)/update` now accept `Monitor` and `MaxFailureRatio` parameters, which control the response to failures during service updates. +* `POST /services/(id or name)/update` now accepts a `ForceUpdate` parameter inside the `TaskTemplate`, which causes the service to be updated even if there are no changes which would ordinarily trigger an update. +* `POST /services/create` and `POST /services/(id or name)/update` now return a `Warnings` array. +* `GET /networks/(name)` now returns field `Created` in response to show network created time. +* `POST /containers/(id or name)/exec` now accepts an `Env` field, which holds a list of environment variables to be set in the context of the command execution. +* `GET /volumes`, `GET /volumes/(name)`, and `POST /volumes/create` now return the `Options` field which holds the driver specific options to use for when creating the volume. +* `GET /exec/(id)/json` now returns `Pid`, which is the system pid for the exec'd process. +* `POST /containers/prune` prunes stopped containers. +* `POST /images/prune` prunes unused images. +* `POST /volumes/prune` prunes unused volumes. +* `POST /networks/prune` prunes unused networks. +* Every API response now includes a `Docker-Experimental` header specifying if experimental features are enabled (value can be `true` or `false`). +* Every API response now includes a `API-Version` header specifying the default API version of the server. +* The `hostConfig` option now accepts the fields `CpuRealtimePeriod` and `CpuRtRuntime` to allocate cpu runtime to rt tasks when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel. +* The `SecurityOptions` field within the `GET /info` response now includes `userns` if user namespaces are enabled in the daemon. +* `GET /nodes` and `GET /node/(id or name)` now return `Addr` as part of a node's `Status`, which is the address that that node connects to the manager from. +* The `HostConfig` field now includes `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. +* `GET /info` now returns more structured information about security options. +* The `HostConfig` field now includes `CpuCount` that represents the number of CPUs available for execution by the container. Windows daemon only. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. +* `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. +* `GET /plugins` list plugins. +* `POST /plugins/pull?name=` pulls a plugin. +* `GET /plugins/(plugin name)` inspect a plugin. +* `POST /plugins/(plugin name)/set` configure a plugin. +* `POST /plugins/(plugin name)/enable` enable a plugin. +* `POST /plugins/(plugin name)/disable` disable a plugin. +* `POST /plugins/(plugin name)/push` push a plugin. +* `POST /plugins/create?name=(plugin name)` create a plugin. +* `DELETE /plugins/(plugin name)` delete a plugin. +* `POST /node/(id or name)/update` now accepts both `id` or `name` to identify the node to update. +* `GET /images/json` now support a `reference` filter. +* `GET /secrets` returns information on the secrets. +* `POST /secrets/create` creates a secret. +* `DELETE /secrets/{id}` removes the secret `id`. +* `GET /secrets/{id}` returns information on the secret `id`. +* `POST /secrets/{id}/update` updates the secret `id`. +* `POST /services/(id or name)/update` now accepts service name or prefix of service id as a parameter. +* `POST /containers/create` added 2 built-in log-opts that work on all logging drivers, +`mode` (`blocking`|`non-blocking`), and `max-buffer-size` (e.g. `2m`) which enables a non-blocking log buffer. + +## v1.24 API changes + +[Docker Engine API v1.24](v1.24.md) documentation + +* `POST /containers/create` now takes `StorageOpt` field. +* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported. +* `GET /info` no longer returns the `ExecutionDriver` property. This property was no longer used after integration + with ContainerD in Docker 1.11. +* `GET /networks` now supports filtering by `label` and `driver`. +* `GET /containers/json` now supports filtering containers by `network` name or id. +* `POST /containers/create` now takes `IOMaximumBandwidth` and `IOMaximumIOps` fields. Windows daemon only. +* `POST /containers/create` now returns an HTTP 400 "bad parameter" message + if no command is specified (instead of an HTTP 500 "server error") +* `GET /images/search` now takes a `filters` query parameter. +* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. +* `GET /events` now supports filtering by daemon name or ID. +* `GET /events` now supports a `detach` event that is emitted on detaching from container process. +* `GET /events` now supports an `exec_detach ` event that is emitted on detaching from exec process. +* `GET /images/json` now supports filters `since` and `before`. +* `POST /containers/(id or name)/start` no longer accepts a `HostConfig`. +* `POST /images/(name)/tag` no longer has a `force` query parameter. +* `GET /images/search` now supports maximum returned search results `limit`. +* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version. +* API errors are now returned as JSON instead of plain text. +* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container. +* `POST /containers//exec` and `POST /exec//start` + no longer expects a "Container" field to be present. This property was not used + and is no longer sent by the docker client. +* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname). +* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:`, + to have the container join the PID namespace of an existing container. + +## v1.23 API changes + +[Docker Engine API v1.23](v1.23.md) documentation + +* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`. +* `GET /containers/json` returns the mount points for the container. +* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not. +* `GET /networks/(name)` now returns an `EnableIPv6` field showing whether the network has ipv6 enabled or not. +* `POST /containers/(name)/update` now supports updating container's restart policy. +* `POST /networks/create` now supports enabling ipv6 on the network by setting the `EnableIPv6` field (doing this with a label will no longer work). +* `GET /info` now returns `CgroupDriver` field showing what cgroup driver the daemon is using; `cgroupfs` or `systemd`. +* `GET /info` now returns `KernelMemory` field, showing if "kernel memory limit" is supported. +* `POST /containers/create` now takes `PidsLimit` field, if the kernel is >= 4.3 and the pids cgroup is supported. +* `GET /containers/(id or name)/stats` now returns `pids_stats`, if the kernel is >= 4.3 and the pids cgroup is supported. +* `POST /containers/create` now allows you to override usernamespaces remapping and use privileged options for the container. +* `POST /containers/create` now allows specifying `nocopy` for named volumes, which disables automatic copying from the container path to the volume. +* `POST /auth` now returns an `IdentityToken` when supported by a registry. +* `POST /containers/create` with both `Hostname` and `Domainname` fields specified will result in the container's hostname being set to `Hostname`, rather than `Hostname.Domainname`. +* `GET /volumes` now supports more filters, new added filters are `name` and `driver`. +* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs. +* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details. + +## v1.22 API changes + +[Docker Engine API v1.22](v1.22.md) documentation + +* `POST /container/(name)/update` updates the resources of a container. +* `GET /containers/json` supports filter `isolation` on Windows. +* `GET /containers/json` now returns the list of networks of containers. +* `GET /info` Now returns `Architecture` and `OSType` fields, providing information + about the host architecture and operating system type that the daemon runs on. +* `GET /networks/(name)` now returns a `Name` field for each container attached to the network. +* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it + consistent with other date/time values returned by the API. +* `AuthConfig` now supports a `registrytoken` for token based authentication +* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` +* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` + will be cancelled if the HTTP connection making the API request is closed before + the push or pull completes. +* `POST /containers/create` now allows you to set a read/write rate limit for a + device (in bytes per second or IO per second). +* `GET /networks` now supports filtering by `name`, `id` and `type`. +* `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `GET /info` now includes the number of containers running, stopped, and paused. +* `POST /networks/create` now supports restricting external access to the network by setting the `Internal` field. +* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network +* `GET /containers/(id)/json` now returns the `NetworkID` of containers. +* `POST /networks/create` Now supports an options field in the IPAM config that provides options + for custom IPAM plugins. +* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any + are available. +* `GET /networks/` now returns subnets info for user-defined networks. +* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications + that are built on top of engine. + +## v1.21 API changes + +[Docker Engine API v1.21](v1.21.md) documentation + +* `GET /volumes` lists volumes from all volume drivers. +* `POST /volumes/create` to create a volume. +* `GET /volumes/(name)` get low-level information about a volume. +* `DELETE /volumes/(name)` remove a volume with the specified name. +* `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. +* `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. +* The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. +* `GET /containers/(id)/stats` will return networking information respectively for each interface. +* The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. +* `POST /build` now optionally takes a serialized map of build-time variables. +* `GET /events` now includes a `timenano` field, in addition to the existing `time` field. +* `GET /events` now supports filtering by image and container labels. +* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. +* `GET /containers/json` will return `ImageID` of the image used by container. +* `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. +* `POST /containers/create` now takes `KernelMemory` in HostConfig to specify kernel memory limit. +* `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. +* `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, + detailing network settings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, + detailing networksettings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the + badness heuristic. This heuristic selects which processes the OOM killer kills + under out-of-memory conditions. + +## v1.20 API changes + +[Docker Engine API v1.20](v1.20.md) documentation + +* `GET /containers/(id)/archive` get an archive of filesystem content from a container. +* `PUT /containers/(id)/archive` upload an archive of content to be extracted to +an existing directory inside a container's filesystem. +* `POST /containers/(id)/copy` is deprecated in favor of the above `archive` +endpoint which can be used to download files and directories from a container. +* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a +list of additional groups that the container process will run as. + +## v1.19 API changes + +[Docker Engine API v1.19](v1.19.md) documentation + +* When the daemon detects a version mismatch with the client, usually when +the client is newer than the daemon, an HTTP 400 is now returned instead +of a 404. +* `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. +* `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. +* `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and +`SwapLimit` are now returned as boolean instead of as an int. In addition, the +end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and +`OomKillDisable`. +* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` +* `POST /build` accepts `cpuperiod` and `cpuquota` options + +## v1.18 API changes + +[Docker Engine API v1.18](v1.18.md) documentation + +* `GET /version` now returns `Os`, `Arch` and `KernelVersion`. +* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. +* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. +* `GET /images/json` added a `RepoDigests` field to include image digest information. +* `POST /build` can now set resource constraints for all containers created for the build. +* `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. +* `POST /build` closing the HTTP request cancels the build +* `POST /containers/(id)/exec` includes `Warnings` field to response. diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 0000000..435d98e --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,79 @@ +# Getting started with docker + +Using docker should feel very natural to docker users since it shares the same +command structure. + +**Pulling images** + +```bash +docker pull resin/rpi-raspbian:jessie +``` + +**Running containers** + +```bash +docker run -it --rm resin/rpi-raspbian:jessie /bin/bash +``` + +## Container deltas + +Docker comes with support for container deltas, a way of computing a binary +description of what changed between two images. A delta can be pushed to the +standard docker registry, it has the same format as a docker image! Let's see +how this works. + +Deltas are computed for a **target** image against a **base** image. For a +docker instance to be able pull the **target** using deltas it must have the +**base** already. Docker will make sure this is the case and bail out if it +doesn't have the appropriate data to apply the delta. + +Unlike layer based pulling, with container deltas if any part of any file of +any layer is shared between the two images it will be referenced instaed of +being transferred. This amount to huge bandwidth savings that range between +10-70x for most cases. + +### Using deltas + +**Creating a container delta** + +```bash +docker image delta resin/raspberrypi3-node:6 resin/raspberrypi3-node:7 +``` + +The image ID of the delta will be printed at the end of the command. You can +now manipulate the delta image like a normal image. Of course, you won't be +able to run it since it doesn't contain a complete filesystem. Let's push our +delta to the registry! But first, let's give a name to our delta. + +```bash +docker tag resin/raspberrypi3-node:delta-6-7 +``` + +**Pushing with deltas** + +```bash +docker push resin/raspberrypi3-node:delta-6-7 +``` +That's it! Deltas are like normal images as far as the registry is concerned. + +**Pulling with deltas** + +Now, let's delete the delta and target from the local daemon and try to pull using deltas. +```bash +docker rmi -f resin/raspberrypi3-node:delta-6-7 resin/raspberrypi3-node:7 +``` +And now let's pull the delta. +```bash +docker pull resin/raspberrypi3-node:delta-6-7 +``` + +That's it! Docker understands that the image it tries to download is a delta +image and switches to delta application mode automatically. + +After pulling is complete you should end up with an image with the same image +id as `resin/raspberrypi3-node:7`. Let's verify that by also pulling +`resin/raspberrypi3-node:7`. It should be a no-op. + +```bash +docker pull resin/raspberrypi3-node:7 +``` diff --git a/docs/static_files/balena-logo-black.svg b/docs/static_files/balena-logo-black.svg new file mode 100644 index 0000000..e0d23f9 --- /dev/null +++ b/docs/static_files/balena-logo-black.svg @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/static_files/contributors.png b/docs/static_files/contributors.png new file mode 100644 index 0000000000000000000000000000000000000000..63c0a0c09b58bce2e1ade867760a937612934202 GIT binary patch literal 23100 zcmb@OV{j(E_x888ZEbB^Tbp;=t!>=3ZQHint!;a2+qU)J@BCgqZ=W|wCYelTGH1?Q z`J8JKt|%{o0E-Lzo_Y4dhY>#tUQd@oOt3`hoOL|BKcJqN3Vxy$AhV?ax|X zwP7_xdmcs=u4=&I4-7>PVT51Et0V?6adF`Fly>RguBKa_?u?$lQ0phh>{!;bJF?oY zC)U*RkjI zsZ{9ymFQPP=Ffni?-C9P+OC9;+m(1`#sifR2o?cG#SMq<7^FFga@MBPTZc}}8{OtP z!CXnufoQzjhXU6pDu?FO?q?N~x)gG+s5_)nwFPTwBYb7~CibK72sE%1)SrJd^${!^ zEHw&Dtkf^i4vaU^uU^}gy=)4@H#0a?oWg*dF~t!rx~8uPt`)MO;*-1xScpDwk8Yd6 zV|oWAE&v<#MZbeL_k~-u+`8|Wmvs+s;S27t7f}( zmXlHcCaBX3IF<@+Er8_&TT#hEUm$9AWJhM+a2DG>$2af5AN|C9xfYFRFuLL?G1+^h z)!T!@8{JnMuGS?na}ca1a>7RB3N7|6Ju30;Ef@hw{DK01K4-F7z?2a1d z6xYB&;G|Ie<*=I5*<4$#T&O(X^Lx-a9hgg%5-@!cNcW>GOGntHUgUcesfkwoDU2}( zp^XvJc&>8m=-3u88Wgay@j*@{{4f45)4=9Jiy38dDa1naT<3nu?&eE&0d1Rn`W{JW z+U8LtqZPyYp%!)zg2B{3Lbkcu{ZEP<^T>cg)(%UgkghgLTC7~CpI^-|9klpVyAWH7 zHV%sv1WpJwWSw3XfkAq0r09b)a@xX9bKHO9xrn_r-#E&xkbhxq3~^b?MMfTMrD`?h zS1|01CL;=KT9+@O+wU!YZtx6RE#mF=ft=3IwPZU@AV(b-C>&!>dN-kzGwftdTF&rd zV2HwHnX$g0Btk=EJ0UuWB9?h;sVo?F6sphY7i^nLaqx;0#|-1_NIN znQ=;!ID#{lTECe0DwM(&vRwJD*)Gz$_3@es`+CpqM7Q%IYfT1VZ34@S>|}^$zHlkX zqF8IhE*z}@E5)`|)vYmMV8Fuuwl#g)M;dnrznCsx^)FDd&2BGw3%jIv9)+lZGfX0) zYXR{+(nD1^wvq>61Pi@=cYUqu1fPu({)DXZT}E*gsEJb65~g}4&2#wpn~EreB`Aiq zg7Pbi@B7eVz2$nQVzSKlXewM|_q0II6N#6^m%zv+)CQ#e{aek6h^j*oSezfg@@ES8MT#Kt;stNsM)7<|y0gZ}3|BfisAPKfpssv+PnM%1VQY)$rhkVcvBn)CNqo=Dw6JxNoo5 z+?S)IhN;I!PR9W}TV0%Cg%P|=Rwc*vuJcWXX}&i%Q7k8eY_YQNJh(%Oe)&`@<6og9 zh)NBcA`1moGJ-+ovIx@i7-yFFK0~yQnAM8Sr9^6XuoX!AbC?P$q2~6iiAfl=nAEpt ztl<$HhP=f;xkH2R(?;rl($kw#&`v9rb%-xrT#kQOtTDMAWxbBS4HQeav)Z{pn`{*f zNzch%el?n$o;f}o{1b;5TUzc)DubRG z_P6s%V$>GskUzg(#JC12ZBkjI>s2sl+{Py{-jh`1@wQ-7m7=K;DQA=*epw=LFrIgt zbYj&O@gA1R^a2EOt0z3ZbP);;o)nTj2r^hI#QW~6(^`bRt$iy zl()J$C_Q(fa_hBYvjNUJAg)_uN(5Yc^KwcJf^P4KI#lO(yt;Dt%5_C^SN+!I$*8R( z9M?!Cjh>#_`&aK}=msRvaedcGO2XFZhr>Gl8X2nHL0P#*@kV6UApxH+v_8;{N_M0Pl$r%yaDSqn6uqqIJBFa=5ojh-~A-^ zPmeoEHRJI_JUY^dNhY?vAL@v^rHqsPz1_@V2OKVYs(b?9_I#N5q3U0Chzi|5KU0n> zRmfWd!I0N3rM@IH1c^x0U4q4o!Kk0L38x(yTR!)X$*8xmmY-}#7eIVJZkfXzV88`x z!$Al&+Ttx~wK}v8dyHMT_ubr=!e}@<74}62y?&~OkaF>|qt=A81dXQ63s+)pM>@<@++StGdv_h{M{^*6(4wSA?L&`h(vdvy=jjQhB$s3~ob z3Y`hzC%|P0cDQR_1}PGC$1c0J>5p^z_2l~T?oQ@{K_oKU9b&I1Oyd<(gP?o$^i(oi z9e5d7eY~#v%EB)Gf-*+X+6%$qZ+Vc+9vrd(yTQ-(`=Y7~>G&eQq3jTPK z?+SNz?L*%+JQaLZ}9TlRVYrGKEHC-veo<>48F+QB1OX_b-y1 ziFZCun;tAP&7Y0IJMx=&mUN}<`z*`t?N2aY?vk(MCdW=@(=uNkl~T80k&p~)NiMJI z_V}mOu+X9?basLL{_S~lM#kyDMHEU41?1ZZYu1zXWaw)kUC>@9r_yDtAL81{(T__n zc9AR&U8ZH_zL$h_w_(L9u3!zbOuU;*atF*!Dy$yc+6jHptGZq~HnX z=%6U+11T1-&TdgZrFtxrFlCioFmWS8XF@Re;1RY?ajff%ePKRjy6$9|i_b(x*Us2^ z%v;$!7$sR!p8U~Ig?=`jzhrjarKGU>D6stp{3uUyIHUa{YK?X8$H#H%cXf^v!fhH` zd1;q$Ji0XkZSz3MoDGoRY4HrVPP`SD6+hx8B*&} z^G~lwg9($F&bIPYuHK!?;+*1Yn|Z8zXgMP~u^^s+)+BZTyV`i5c+jAnK$g?Dnzz)l z!Lp;k+7bS(+$`DHlDO2k6A{&JPK_#sC_IJ<_YZn9bCeXD@>Dj*oU~^Oipd|a)1j@@ zd9c!{Z-pq$UXCd0r8J=tRefM@qn=`aPczYp}J#$p75w76m2L2&@gGH$xJ5 zEeP7RUtVAh)Jbl6Yn%30+Y`!kdZ6Ddopj|r8m7J_48skVI%iLci*%x^Cd%eekU|aJ za?ga`snZG8h>&sMx#noLwhE~?*DOd#a%inn;Nbr3K6u?*D6_Fo4*LmTtgal6xQ1MT z1fvmX5TVixN1@_b0Lk-qCbFQ0`YXFK?N{fmK3&ch^rVcuNO^sxWLHOilBqsU-jY06u2G0xa-3$+X*yVJf zI~UKD!|JODG?=z?b#@%R)2H;3->;EB5UGT%k572IJ9H)t2rW?e7n=(GlVKcew~ zam>#q{bbEX(IZg%qh}Cl4};R|cEX^#^}ujEn3WWj-=VjnVqeB)6cZ?-M$xdb)@29m z_ufV#^-1gmtJ^Qw-f{Jt@Nxv>aC+H%~G^2VgIo?1)MnmtZ} zMI}6!Tkm&Pk-rA1eY&y+84IJ4bhZc&$fWF6YB3!}hD{c%cKVSh;!zxj<{8y5_Os6v6lFp|HsipZZq#_}(K94RQ%wcq;|?*27TE1D)R4-6exbOcEBS z%I}n{D!QE7d_+oy%BWK3SY(;`3NWvBI}f5(w<3~H%jY>PW<++|`1Y_EEYv@E>|FBc zEpBxzGto3zWkt7uWI}nDJG+c}F8AQ~ug--Ojkl2wf@lG&9pol}ZWNM?@aV(v^eFokThdqjox%^trPxl$6CuYbl8lGynbl3QZ z3|b^N@NV8<$P)TW_=*|bTF4X13gK%)i)@oNh%DHS4h4+3@JM!lRU%NkrgpAinJg%p z+qa_h@kz1s%DxKPs!HgY*4N}qhE0iueK!OWx3xwKQP`Z4B4pdmju_3Md8|mk`Cc%- zYa;n2#lX}yK|c7mT#8@1TY&NeK9!hbYV>XS@%g{uU4GEo&kVQS{-E-?Vm3JG^W~1w zC!8Ud-`K}PlWHPN#=i|nsxXIDk8;#r*=#3GY<<$9w%{2$LIH#tu`su(9oJAnqrfw4 z2B31CsqvQR1`u!wcRiVFFJY2QTH7mv&p0FCxRvc&4P`=p7FV06-O#rf*;`riR2Hen z1j%}T=s42*hZN=hN?|AJ&QSw(pTM;uW#)&wuva7y%?`2+T|(M%dx79#Xr6Tf5EQZneX5 z2P@~l%{-;BRK%8)Gnu9s`b?a}EZWeWB*{-=kM>CA7vs}9+IoD&2L|&IVri>A zKNu&oqw$OineM{b%a(ZL!=TMLe44hW;ClSgZtfDu-Ckdh>m0UO)UTQ@Xl*Q$Va9=y zYRJ(y&SXiWT>*wDSF&~!19j1SQE(K74up4it=ZE9J4HVybMXTlaII>#b~^2i^B{4k}n@Vpf23Wh$D#Ya#JXJ1Q_4S^<1( zeBvx#7Lt$S{5mY>4LYSNp-JyPj)f8Nl!!>RiM{r~r><3RNmkoOaBJhV5071mB<0&6 zMBRb=79(1t*sG{m`7#2S)9&iuYS}3(U$hq&`iCd6TSvwqW)3;&U7gCY ztZY44JH#hvo7gqS=3590X!A|b)PoxmJw%j>qRjFM0gpSCK0d`;XEr16jPQ51fx8Pg zHE3**?VgN+8{s;Q_Xwq`q**R6L}YvGV~&d%YEcmsHyvkY zEt;G0xi+_(@Rs;=y-^647=v$D^kx7XO*I#*?Q74^vu1pOSVT&eYe;;Oxm&_%3dS__ zs%gcJ3XFwo2WK^pr~YD5*6G0vbu;}Ccv>sPJ%&hyooMt)Hz$~RQ-kBPU~E=(0nT5- z*v}0Xo%~=(jxL#S-!KUQrk*}dD_O^@{9r2LkwH7E;&WcsY42>R@ErHV8^YC=r?d?y z>*o$s3jBT#2Z3Rk{rANUiw|L$fsL+2%4DGOYUPpKZ!Xq)d}v0d+p#`1{nQz1MVmSn zXbTQX-;S(NfK1E?Wl2z>fVl6y?utRE&}W| zt>AS>Z!PKGCJRwLEs}cqL*YqtkljsW9JNFS#gM8=6{@E6@B^8@6B!9?`;jdeM`S%w z7^M2f#)~)POPrKF@TOaEl^(IqRJ8k?(rQ(%;~Q(pZdR4HzkM*}5A8#O!n%~$3ozAv zSp+E~WuP?AMQIp{h5@4#!J%U9=W6_vmk|rQ*8a)Yg=UXWtl=)X*R))nDbE0{>1sC} zi3b}oQ_gy-p*=7y;~8;*Vsg&o!(K(Tg(5S?F_9x;(dWg+nze~%qYVG1ZQCQ>BVrWm zCgbrp@g}&O_i-eAkQAAU>aoZZ^LGi8+fz|Crw@SDpfkc0VWK?f3xG zVYb*wPj2A$79(=&ksP?KY>3_)MNsFbgoZp(U=hmM+Z-J>}%+69htY7X}bKOmOd1r0d^CYh6 zkBQB&5v1?nR$AD6KH2T(`_;hnHFoIV`AQ~JMXr4&^(BNg4G;8KIVsUIrMGCEUo`x6 zDBTiM=|!;Z|H;4kAs?mR1Cv}yX6P#{-a6G)-w7F}HTsHYE0zx6AZ z!wcPqXUs@YxXSGgBgcMApzWYs{(4;phk|>E#G{#kPjnc&jp8P5dRd#Io%cSDlzlng z{AhE_^Fcf0^47L~W|bhx1W`NADQjZ9_^#>JJqLWyI}W-hkq0E6s__<1CfV(B6ozE( zJLr?-k4))TjV@qH#IO}MH-XI7y14#Ng$jaCb!7|lCfvK)J|dCKLT$lWu_UI}E7S<=H?DX4tYo#>yf;}>4-2VQy4ce1x&>fn_cLn3eO&nO+_?;#i zw3i(aEh-z&FG}jNXpcJU;%eNMLpxwuqSNXSh&yA&BCXUfAl5O1-lUi4G)SO^1AvJ8{+@GSRJ8mE(5q3!94h^DUc zsr=a9_gw?$ad2Y1Y@1POPgkp0oBiG1WTnHx5?6|LX2{D)66f>f*Q3Vti0qrNAay}C zh%T%?SuzH>n4f<2JNj@Wjs3myLa?dyJSMgBp~-2zxiruG5HA*A9SyZ3o746ncVyUB zr9Z2*CPH_g{xc5N=Ya$YwsAI4P-77_@D(ND{awAZNzpRC! z(eUTAy7E#f%{`!3lImU=oT5I-2KlcfRe)kvr*)18JO{+_enyGJRD_@R}L2D0L**u?*Y(60`!x z*<2sS(;}Aop9kT~WKM610cm6BTlwk|U+)g{D~&%Fsa_7j)V9 zo?I!D6`cW;n~ebDKfE{sc?|SEs}5vlp#k}p-i);>Y43kCC8f86fgaowdl#(exa?0o zh;mM=3V@aE?LPTY5VR5d?CZ)Y&I{J5cCqeX&g?_gF^wg&Z)$WD;jO{K9ui@~Hdj^rxlpf z9Eu+P3O8*wP{yh~-`VZhB!;)-!?ezM{Pkr_BHBZ{w@-!UOb)n_)TB0L@!R$q4^>Yg zt;>bDoi{a{o7x`XuVpZjHDA2aLprmWWd^cfhex7inNuk%{;BKUkSPNRtEc zza-iT|4qj1;R2f=m{ty%pat4qAxH1<*`I~r`uXQf{F}ZnzBbL?E6KIDEzbw}X@C#N zQdK%vM`uA8FrT_Nivxp=Zrj&C^?O=byJN0h$xdf%M@MQoomvB}W=kX66ddBUe9fwD zG=Zxh2c-bo4V|&ZKIM9n_)`^Ty5q0Y{YN~TufSD@@_r;b!8MEl1C}aHRpBKE{wkSQ zIh=c10;>R|62RCSfNm0Bdl_HBR>{wi|HTtoJr1wc^QO{l4I2ukOO*7H?i;kX4S{t< zenI9Nbr@Ll1HLM7Ht4ReF#qL6cnM%m2(C;Tv5ykO#EznYgno$G%mvhivr%s5|O;3m_Q=d@j9rT zYC=+-J@Z!C9qD2FHh!8p1Mia)oXnOOdd*t6GCNqwGR4HrC_&H^oZRR-sZgWwTbj=O z!=0~J4*m{FBDybv(RiiULJ8M5fNf!2g8v?bZ9ywSvj^E(?KyeU8p$Pv#OBihHeBq) zWMM6C$^-$WkbF$@6WfF0YWOJteJ?Pwzx{^{VYtxXU5*=%0mBuQ8=O4~|H7anp)cZ$ z0uTYL;eR|;CVIz(@DOTo^{H01+nVI=hAs;DwN^=jbnIcgOo&L};!2Nk`s%D$iVPGF z-3Nh!pIPV&RqHb0$qkHq*0WJgKe|9$%vAx#?8&{GBxOOgn+v9$!DyXFq{9_l3}j&$ z&d&dVqCWKwqHlHG7)>D3`5}eCDySn zn1R>NqJ45w@Wb40!u`1rM)>4RKIuutkTs+!SVgUV+BCcRHQkN{FkVj|Q}5MC3Mh6w zHq@8pA&j~X?ks$7ZG;V$W1NkFy?v_E--yMa(qO^BJ-f7K&pn(Rd3$uAJfnvwb0_wR zLyuKILd9@=@GNM{qZd7Sk-u-9 z8hnG?8;2d;wu~z9aAgbkitRNa%9#yBeR4pCVMhb62fI@{!@pA3E5W?00%W?<-hTeb zXnt3)qF=5{c=xt{7abojDU8~F!9~(O9G!-mPYuQUT>M+?0c<7J0kkeW8M@=vQEew( z3Jk^Ooge`<)yUQmpGz!H$q;p`ONP!rxNL_Lq)f4PIn;yqOnsv6LK4(;LvT2{K9frj zqBc@E=fP>%qJ!Ts7{?Yn?EUC@MxFf87{&VU`;e1_+JXkqJ#_^!$Jz`(KuMcL7A4(R7Tr8loR1EJ1ZJcFSzl1rW%)Y)3T?Hh{Y+D`UZ25Hre|mZ)23 z=Wl5F(bAXYjh_;At^ar)a3P$^*^YQ8^b!ts(*uO9x=(M)8B;j6A{hr~G$o&V`S#Kobn}9sHW#ioMclp98a; z?N6i`F2enDfUyLe6XgzLH1mmH#vs;j-`sffR8soP>rSG!VK=v_O&gI zgI&!CbG+2p&(uogt>UHN#12YvdgE`X)nNc)o6V6+TIQMP3UKQ_JVt1>(J>iPqSw5o z?+EY5cELRrOysYfHWjLEr&{U_Et9 zBS;@k^qwEsaQFI1*C6-l@EV>-A%_bjq|Z;LYt~#;s@{mcg|Mx#-^9i-?KpktZel32 z(5-QS4&VA|)h3GOf{tlZf08is>8S8^?q&-79$^JC!wYKoe{aLtf*jup$hXJNT-thq z)(Dq%Ph&1*{?k|6?SDPU3g3AawK-cX`OAEK_h_JWx)3@`8E8TcN3;gtaqd>)I9R%=94AHMg1-R3lq_ItzhtX zs@2XwqrcU50Cld9()E{=98uN>?4*<3$=&Jt*=I#M{O7hq(}lm5;4h6OgNF|nI1l_6 zSOZEV!`>JL)jmp#pdugA2g`hftMd`HHl+U?F9Zo-Bgz|7Zx>ARx1!(~lYC(WLQmOR zz40Ifkwe8u8-W_PQj1`Y8+I%pQA%r z*ERO#JtRSRZ22YC1jO9e%2sC4vfi;DCsdJw%r=DyR^Cn<`&m`Fj3*l7`Td4y?-+Zs z6UgzNDDO6sj5vhRNUj5?(K9LWv)2Dk({U7ftADVd{InqJrfH}Qm@Mu(yiXBL8bbyw z?__zMDR%Js2D(_v{yv!2Se(Trd>3>SAXH9Jird_GG0ln#^TSz6Tr+5>(N( zG*L>oEKNiITf^bYBsXMoHrI>vMap2IWREM7f_v5aP7n-1SP}|c-T68l-{5b6oZQUb zf#rC0JH@iUu0yFsgt_gUYcWz{k;3Q_5t z>1{sQXG^SX|G3ENOGAg!obXnMX3JO`P61J)J7U5ziV0`Dt9m&jYaHDMJ)zX3JP%2_W^a*-9oD`JjAwe1U9A|N? zw4O7fOfeGURurS}SI&HQ6@p0Qie-4a*@)Vu-C`r>u5{_b4h<+M4v+{Q?{-OAgmi}~ zLkWK=o68b1DM-A};8tm0s9JlmoRkb`vccCm@+a`?|p2m6%3tek1)wAv zSa1w+Vpxe}Ss~B-lFPh#DiUB|{D@SNZW_^wTL~l*mbv>~>f>l!pxaULpKEw=)qg$Nk9XOZYa0G2t^x+V<4bVRsZwaH)l>g+ zt9F(fdeUuY&9`#q67O4N#lp&p4o0I`h;h~8nD%Y!VOXH11nFhZNF(N2!gf2Ki@*^O zc!w+}zr?24dgV{{jW4j06!46QsxD8>kiKy|T2coVzKR{z8^&;gdKBv?RI~VNRYNda zd)o=b)fVYba=_)__Zg#v!mI%ov5u1dFhn?K3pV+x7}eJWa{bmy%>Hz*yG125JBl{T z0y4rsW1Dfw12$(}+o64>v&Oimt))6>O{WYr!WT){=t?B(hyAnkc2uIh(QKd<&(RY> zNdREH$S)4bcXCL_llWl)#TbELpD;y}%(Hwz)JO`&y7M}*jp$IMR>^)-x~3y?#kQP^ z$c3J+)0Pssgf7!`$Mk$@d8#qM*4rFvy%N+&Q#yiMv#F4d3BQkf4N_@4!((sVyXLA8@w)$&@DoMIpDvLKu>k@E ze2;ItOR)VYr@!35`h2SzmE0*5wMysn z(fd+?#wH#rw-_Quw!}dztCp^KPa$TXl$x4!!8kdyIb`7Z2);G;*s!$k2hB1wRa_VD zLZO*yZb{h|?l-ZkHYx|6%csKFL<>S$eM`b+@*Jo~=DBkS9<~utwZeQN3_>#6ibDBh z(U32@O#U79W_03WUP`mW$8Esvj7=er+iRY;>&r?58Ko#pD>E$Xr?&r1Z|5L|iPvYrRTT=WVsg(!euCvC6uvh>(|SH7tMKxFw@ro?p4Rw4=%LsS5< zKo~laFV)s)e+gu{CqsplvDW8X*XChM5EU@t+mm^5Mbh>6UhEIpQ@A*wiD+`Q2-z|S zONfxkB1I4RrBaaeZI2lJ_M0eo*GU!qV(*VJ--mKto&X2g;VD%+EyP{L0Ndhm$N2)@ zI2@>&i&&htvvfk*6q63hz`!xspc-rkJaSlUroULnDQqaKjpBd8hZ zR&>)cMb4Y$Q`5jmi$wFrQsskEI_XfX4Xx1V<-l=$%PRg$K4YQfca@`wZ)KY>t#7zR z+qI!!jbr65+(blavJ|Iuo~_|TQP%kzq7aBXNAJ5cK&)j8siLY`$SR6)f!VbX*~3f7 zgeWP*E|Kk$?fP}XXAkqwHuDeL?c^1`pF;^{`8XPaB8pTIkc0U}??2l2Bms?ki);ul zl+iT($hNQ_uns6rS6qT_rQA|Kd&plh=u(JbTSX8?r4{BJ@R4^ZGQEt&TElM&IPxvc zkK3^PDYTe5Q}q)cqDlYxq$6|)$m+}`AT1@t19>p$o)BBro?_d&5We2LJ-m z3@9|Sp!rdJ9C~I0qG!sA;JC0Td3JRPAT4QnsQAxrb}m34BH@SEleq0|Q$%CIP!sU3 zP#yg1_u(w9Ltmqy4wK$@Uqw>)w<#I+xkOlR!}9dv#%P{}Jajf$ zkjF|z9$Y*37$OMF1;1AeSQL|b z{1V`=?F*gk=fXn$>uMDvErrzMnKEm?^LAahMRhGzZCQ8hwx4=sR`(T+vBZ?-iA{bD zooqvt>;Tiq+6$u+d4@ZInI{80A}?rz9l?SIH)2ny_-7Sb2N~U`OGzqP5EL2a$287qs^)Tk6Pign6p&L3r?iPUgx7>h)9v&@~SOPb{-r0MD2 zyi1~1dG5c=A9oFa%E9hJ70O*dh8>=<$eQZ(@rIlIk5gqD!2G7gPh(pDfc$s%E14!j zV=q|=9U8IYXHRl&Ag0+-iN5C8$E@NSq{tLxNJ_<*klrU z&%PYkh3%PX1`%*lkJcfa5`eA}6IbuGE-`j(HS+d7-4CME>zu-=Bf{J9nw-Xky*a6+ z-frjGlhPmUkSBxmT#Sok?0-t%QxZDoQM=9^OyybOEH4oiFT4OH}G0 zW|fi!1Wk&~2xn~VaLdqPMJ?(fEWE$L?kDvIsTWv#@5l^M_l13M1coUO8J@WrFF*~} ziWP<YSL&H4` zY;+h5Gz`J+{xMb=EQhZz_Nld(z-{WyIlD$S0l&)lKc*NqmOL7K1?5WYv8|>t9-$2= zJSyqL(K!0PS#loR(<@e?JoqmVW@TEi@ijcYp51_dv68EwV)x>l$dUP*?=p_#C(-dW zW+6n1D`{nI?efj${n?}ZpfoCE?WM0VN2rHoo zV)$feiB2O2&55bEm%i#rGIgC#SvLxp-(x z(3rn7h*CL3`eXe-UqPYb^2!CO5rTs1ngpv)ZC{2OtC{TZK2))j1}aZYcxM9MHEA%K znw;~j%t#WRT1`xK#63Kg5+qu&^^o(Lq=QnMqC@D3FwkIbT}m7BtA|k{l~^238-{nC z=D&w9>hz&h!ZS{Z$_X^B$r_F)%_V&?ieS&O5WDPzusU4FPymx4-j~B8MHCIcTDlmz zoE=L#@gS+g|I2{Yb0 z!!*+l7rs=i@5=Pb6HNiYR!zvcW`HybsPvPvBw~s*iF=);Qj+Z(r!?TV8&2LNKQj*v}$m(Blk*sx4?U4jJAG*7c;0tZh4+Nkv>KUs_#>M zynwKqsv`Tq+2zi*hGv)l(_+g0Kc<=GpV)*!aet}vAnFHugLstX!B10$$U^O)DOW1ntiSk|P0Jp4= zbq5lPxLxa^BWU*>Dc+GIETYL?}JzTo0TzpJGW^&JuRyKiPM=MQjNOf8|6KC3uevYbXv{B+CJ&+%ab}zG91B`AQ z@m-9XDp>rrXCw4OAZR))ggBjWiSLSdj`2ib=-tl@f8R})F>Dxr2;#wkaEDXEx%lmOH z5&H8~CAtDG>y?w&BRTH%0~Nc1^>O2MmDhKjhXPO07YMAE@;o~DQ2hk2FIIiE!00KF zR(yU@MZ(QtWlU)QWkn$Y0ZfN<$h1>>8fSnQRp?Bh`yKYCS<)gt@K3XM6Nom=&cS`@lrzBt3}9YIYsUE(7NkU)jgc#&y+gs)5*$D(>e#e;A$DbNj?Mc68a=3y{JMEPkYd(+8Zt;6>Uq zd_3>Bf9v^Qtd^V}{Y-MP`=!yB7RX-R-R`Cmx0Qg!dex|^!u;T%p=xr|Gp8}5{Ta+*?937 z;X{&jYf)|;yR*&}=#6`$?Fw3VuSCir4w_}pM9c1<68VFYJJml5rZAE zx}^{)x&s6Z0UhN@-Rk8i(9Te86PiarGUl>G?}sO@j4~}qx-`G=-tB$#mYbGV-jkue z{0|4qOsIj+#}y`c2odKwRfZ2oC$aVBPS)feYRr%j@1LPP<-V~m*ROxnXqtqF!Y&iO zlCKrt0TR|5+qfMR&&LQJt?6h8zf&EHX?9*P&Imdf-s>Z$w#%@GJr7V!8a0;Md8zsw z4|TIErPwz{FO;Hu%t;@f_qk_widh?WO1VD!yngw2yx8|RC_W!w3N5DNhn2@4h9>1o^SqS8rFL|ybgPDlmv$-+SFM& zFXM%s^oT-tGNSa0vC#wK_GL1`E0YdrhI)DPm%nlIZa-Ni-hKw*~C4QMvrB z4Xxs*%gvPiM=(<)qX^f=1Gd%^7aZTu%>*_zhHTIcS^&fNWqoXEtuO)i42qf61dG%FEM z5(B^EX(RN)*ElYUhn+=tlsP4JwX}U&$I}^FoDy%j&Cxe;%v>I=I)D7pls9=Bo1gH7 z1p%ppm7S^wKOy5Y?EM3kV6f=uYD<^9q`mE!lsK!@-?&b+ttmB?4nJ&zYLVLHqYCyI(&J5ZU6o;|9a7$ z&@|e$4lCH-fzy%~OFxh<=&P&FU)UEh5wZJyIO4E4wKl#xjb1tddULA0$%DeZjDSqnXc64h?IR+l<+AFuT@!0~16jU$1qnVNJ2l{~?cc=rf@m$ZR)Q z&W?54av*}TKOti+XOH)^SQ9h6A^CizHG#%VulD zqL|#9^9|Ih;!__MpH}{6d3T?j)*Jjf^m0xi-dXaUHxx`6_`5w{dTCdcI`s%GZEf1= zVU3MRKoFuFDti~Ezuywgx>m6c6`YoSMQCV;_KbukvKWPmsTLYNmCcQ0{lOG|bfnPP zqU*`wFg6J8?94pFmNR1T8wyax(N^;EVv?I%cN1mEGU8rR{=Bj~iX^S$HJ6Gs7ZbV=hF7N#Z_rv`I z=j`+BbM}7rTI(z_G^G}9L9;*Jzt1rru~U!RxDK_@Xi8E(;R$+#Omq&uWbT0`x)B)SDNFZobE(jGY{I3MbVf z3G29FiUEJuwGf84-wtMc>-;KJ(h;<3R2gJVl2Xt{Sp8A(9mr*R@0dtZFUZ2m*UIjj z?}Q)FAA3#md$;dtp~u8_P(7TJJEx|Q>mv!U9<;8hRBVyOK-ruqTM{|U`4dkG?j6U$ zk>#qNoM(d%I46G8F1RYSYNMu#bs!4!uR&m9PsPb8`hHM@{P|NcjJToy-v57 z;+sDM*dk_9-1d!HkDnZFoEMUPVM!oOsMlwiwh7wDT`iV zijIsZE9C85w0gZ2;uMCJqo_k9dc_vDf=ysEc0QA}ac5K4{b3`JKf(+DQuOa=LrS+e z%#i;!R8`z$JN^er)%Rs(6o%+Va{Z}HbKIskJ}u^RK|KSq208c(K(G(X&?Ya~{Pn=E z)qp=%k%(6W!Y_Bc;_lmPYg`9MY7>O(a^6J0|8o6Tb>ENSRe0Zr{>{@4ZMN^dRP6AP z)r0yvtM7tw3wI|>+*NnwdF`L%)m64L5SSR?1{VCW8@z1yWT{NVIp8)GmJVsVYFkS% z|HH2xi%-T?O=@rC0^&Y@gwlVUz3A$ zX~`Yu%gPuE`EoMfD02@jLmoFu(}l037@tv}YoZU$o^?Eur*@^$1_`8YJE^g|(H$5< zV%)+26)W1>0OpsXIWmwZRh00zB!NL$_DKzjxN+WO4R>xLIfzr7WA3lBo{S_|{Nh8> zo?oVuRnTWrV@Uw~c*zY!wyx7RL-SjO`?B2ju&L(LBVj_%^NbSSTHsvutI^&P0MQG2 zQljb0aC>4`d_gHB9S)1s7$eSVYXq@cXl&Ir>`Z9xo*;Csw+Tu5cf6$4rR*vvy|Ih? zdq;7+#{{RV2{A?&?-t`eHg6h9$=F(AKJO)!lcy zfJH3h5^qeo&=UPCFO3DIZo-;Z8%227>ML$sO$gmM*u^~Q!oJVi_3wrAReC}Bnl|)$ zX1umAhXIWp6O5v?l@xE0l91=1iYe7cQE!v8ecoQiEY3LQ>$W$O5(4Q>i@Ky5Ic`PI znx#QANYX#|++L$uv=q*GLTNiOI#Kj{!e^(1VbM~}HC;37r{39z-;d`k(a0M*GmU_? z49%Q4son3B$QS~n1(mX-@LgAJqRf>1Fq+TA$sf&5gi*=-U?j{;#02>f?-f>^Su1y> z>je&+`|_`*N$c3A<9@$mY*w=?->DJf83vBd`q7n3MjUsIj?q1M2aJ3Rj;nf>y^)qP zfcqC3Hy7qB_ANG)78E46T4-KYU|DrIn`qxEU2>*9d7;nJeDMeWV~nq65Bp$`$>5## ziJVgM1xtQxnxPb9f?AJu(_1PkYIpMJKV=fN@xqumf=|cqpb@8fL1G%^S=q^Eph#0f z_E(8mc2(p94+THv9GYtK->qC7;gM*|g#vf$(s?~Q>1VXjF=BzwToizePd@VJ2BXnd z9ilh0j-boqjOCw}nQ+Fxi)s>l#n5cIPK*If3LSR0IQ@~BDa#f7<2}VO?lPat3HeoQ zsZ7;Lc!gj172^aDhc!^Xq$E=wwdU0pKSb1MrZy+1;UHS){nx7LTmLd76UFAf0wi$1-K1I3gsV;TzFQe2CaZBl}}= zhK@#=6eAId{*&4KGmia@S0h~W&}Ygt2QfbKgR~kp?M6SqF|JokTcOHa7+U5i2Aiir`qDu869rQnf61M?>DJ^O_2p&hX^B%KX zVk>65tGT;wUg>IadPCWw~=kd0n2Pyw|DVoJ$5byVld5W5^R;Z^0wIDK+e*pe79w z5{Hq-^5>eO<1Wq?G%b3ppF|ZChA*g9@I(JFcZEi`6D&wi2ZS1^*AwE z45YB^7D*Mk=4w-0E8uG=3J-zm5!XUfCo=v{m21{Bl$qt|CWrEgyAuROdvMoVh07dL5ACs{Q02qj_n`z60qB|aqz1~#3v|tRFPJBqD7(l z3k&JyxAoe($X%8_3#-HpYmzrYY$or}pR?|O7yxsI?OBzUnTlSC%J6J^fijDc?PEcG z&d*#GHFY?TNaT-Mo^3FTYJt+UkpZJ1md}fTo>yso3m%)obrXnrE#)@Y7x z&l_KAwbdJX60TsN>R08OOCdU5%04+eQ3I(xEiOc^@O#*VUl>s`*12b#X>egL-t9m> zB-S({5^R5;h#PAzdm;LpPs)sI_Lv%k<`}pb=0C9{#kFF}f++3@P`nA~2z`Nit^y^> zwCZW{bhKfAb{WKGQ?y*|FgI9&MLzed-csc=+;7j$oH@8b#(~c+2gHDkztPsVJ8vJ} z9SC@SV1gM6N08W<{vn^Y)Vo=XFp8cG-MO(fJa|GS;i0>)F}edknqTxCIi~le&z` zQd8cF*5b=}v6?JYAT6MmJSMF7rhC$MF1b()YU5eg`3O#1Y@iyW-+n{ZT0=m^iQqij zIyw1jMCXdpmF*C|fZrv{EpGp`3{i8QF&&6oXx*AV?2p1En;hwdmxfu|7S<3Mu4yC7 z=IYc`4RazPpoKay#hHmUBWWatU*rAEqI|V>=T8!B^z7|RkdeKOu7ksNMN2a}Kc|C6 z)PQ8vez!Q-kcRD6;rGE=hus}7kwN5he1QOfkx6IvGW-^NM&EsE_R9O75A)RiNgJv$ z9XYYIp$kjdn@HQ0z^hF=Kl}0+KfXe{(AFVuW-j!3m6td*?B7N}F;2$z*`Xmkwr>Kf z4$=u64v0-I#=daLpagTMe)HM7B!>>WL zRZHz>*mNF=mF|%&YUR#EbSt8b!#mgDKoo%-w5mSJbQ&+#g!J_EnMudomZ%g|MrN7dfqjJdaV@VbqH=KJ1 z7b+O(6B4*H#^u{C1A<$IGR}ea5Bt9~O+f$s!dB}FDxz}Y&Tn)txOw%qGVaFv6(WC? zz9bYYa>~YTl53*t1N>AsDVm3z&Cf%uL*)UJ3fV;CQe~*jfNTBn4Phx3xCO_gz?^Of zmIuLuqoS>lU{!8d>kcOk5V~sDurFtf&hBbcM0Z8apPYGtyPP_dRc3h;B%YynF@RAz zTB>}f-`xhR#eNT7SP{HW^g;gQ2Y0^Y(0e8U-_cK4Rb`y3R|OSZ@p986A1O|g`UM0o z?jfe#c_A>u4b3()*LWHWW-*Ud^{n?#9~&^g*tm2Ek^n)vJDp5N@nF< z>2L?b4R@?vtbe@@Wrh)NyR!%3qGx#}0UBxx;>_qy{_~t)_Rpg0K9Jy?K~# zT3?=yiHVujuA}-*D5+k&7l@7o^S7QptbGk$X@`-S5i(cXEo;s|(GSbwM@knTDZWkr zZh$*Ui|pd#=O zE|@5YMVWRJYsFUH6vpSd%GmKf3c1v%-R&P__1&R2s@^F2CRpqRU@~G~v0H+MK3y6hTq3hWN>s2!-|q%| zU$!1=64}({#O2x&XY1~ti>rowsSFZd?$AkH^rt4|xBMNvg!`dT@*%RV*d6_biqQls zxvO~VZym74+@nY*Ng6(hCWjjzO)u~`5K2%lN>GoD!U}GA`{nY3H_X4U`+kPcYn>s& z?nmApS0bPl3XU*Xp?WOXYn74gACB$QMV8ZM3rCCS86I`EXdC zU9QycN3MwLTbXKF1GlRDAA^U-O+_^Fl`m9HEr>aVh^#TWYxan~t_?NtcN(zC9fQtz zFS{H}hlMDU9~#Sz?hH8$6nQlEq!UHPi_n*=*>rtn+0U_0=7iPkD%<^ti5*+xhS%Au zy&dZ%4Q;ML^p<(Lm7L@!a(OJnA_Ileo`F25&A~}wbrrET>#0__=XDjV89f8hKFV(d zVYbEsy#GwXmr|1khD!t<#_gVVhiqT}n{=#MtJ?|zYUrCze|>Jm4g_y@Z`atlT22}d zOrj9ONF{oY3etI2BmshHgN1CcpoWb15_YJx?-~H%PVV3Gjtt&`QGK;=5)i$d)yomt z8amgd{-W@)ZCiHK(2yjR7;Zn;kJY>`2!?off#A!TS<7Pnuc3Sz@M70oB5G?_RE?;a*~61G(Z;T@m*MdZH$snsAp4L{~4&3w<;PFiA7crzYB19cr z@;Y8_r+lcnl_lw}`D77jNudxik=@Nif=+DGslF_}ZE8F*J1zXj{b50k@ZkoQqxKM) zYFyfFuQ{J{i7aZIle?xoptdFl*cw<;{`HM6YTZ3;^GUa7;X}Vk3_hMf<2?xi@G=B< zIE}uu_#>bb%{N(GS^Ybl)@ek#Qg69!fFW58OfqCx)YI$2q=fPln?uhm5>V~El3be3 zF^{@nNS-hra8`^pQb487yra7;r5JG`%)xV@BnDVy1x$Z;G_JhI_gEI_G=GCkm1WUC zw4wXmS5X%bmU&OeSb<>^;7@^cQY#W`nF#I$e=AqN#y*yVH$E4n(v)X~^HM;^T?i11 zqdaje=HB2BP&+?O-fh>c< zc*UoO^hPodZ@zNStNQ*66Y?Y^HX`~`qHa5h7_^LhGSQP#@!F zO|Nh2GxrP5M@|}KsDn|rww{nCVaXpJ{?Mcq8Kbc1`jSnl<;6R37R8f%Dh%VMk(K%< zF9kh^yb+)9L@cVsFIU&4N7NQ1Az2)#LgD+_ z;F6z0MW8{6xN4(r513cm`pFEXYUT6~io>~nTTfx6B}6h%T-k~JdarMdSmb4OVHIx z?_;)Mc)dNOujjiEO12~_ARFtj)4IDEo%3}psBt97ooyYA_v`JKA;0(4FE87_9vMl0 z51PI#TJL*HH1oR#Y?INdDP<6h0m+f2H<4bXyh{e<`t%87Ng1mfZyj@Y6h59TRM1wu zbFjOaJt}a`WfmQ;eAdVxz*;+ce z7p)tTw3YS&(`0OJe~^A=p+~XuO{P8V{wYn4q>R*Q4D~)Q>e$U?-!gRVHAC-X)>N3T z*XZywk~H@k&&>YF^6|mq!`zF1knO@A^S*NA$x_{X<`Td13QKR1txaA_Kau}E-N;UF z}_Nd@KmPd)J@GAAJyW1HH|?$Muvj@6BexRYo5)cd~F`_-spS9-Q)8GVMN1)L*T=voe>uY0Uw~F(xk?{pI&ZfN1;Q6M*|@;*?k38mE43V=eHCWpfwx_C#r znAllw?Q*~E6z81p{_Bque~HKLaDi1Q*3|XZtP#Ea-Ut0w_7PG`VGJklvgd1dIu%@Ub#a?BJX7Q2*Nx2x$mJ1*^ja}|1R)=J4Vtiv8$kotL2sflbO>$xT?hsO zuY+Alx)(6`Z%|>0ptQfJN+lq2Q~l089zy4B)UX4LKEKP>ou21e(zDC+-0;~l@Bh{E j`2XOe{3aXi30VC`rN0&MDDi9u{X#`SL%vehEckx_MeJN* literal 0 HcmV?d00001 diff --git a/docs/static_files/docker-logo-compressed.png b/docs/static_files/docker-logo-compressed.png new file mode 100644 index 0000000000000000000000000000000000000000..717d09d773cc46ff8297d06f66d9aa855453f35a GIT binary patch literal 4972 zcmai&cQD-D+s0R0Yj^ebT|uHmbh}CrD|!v0*I@N7h>`>myRteVy6C-&h-eWTEJBos z9-<^fCwij%@;vj-``7QC_sp5O=f2Ko?rY{;=dTm3uctu`WrKo1AZjg5RYMSn3;=;h zs3By3m9`raW`B#bzK*fl-zuwX{C{n-Y$9*-1}L@}R6Pc&oVoU4=l$%0kfT3mND56# zql9}1Uva-kRFRCXF3fhIFQ&XZ}oXTOFRof05QPgBSL6y@1 zl15u3N2zTTe@FcP3$ULSt)XykoVa5Zj-VooAmOmv9K8eRY%r?F2HGJ->^d)z>(fIs z36C3F!*19iQFc#}ioi8E9r%m^O#b1iB0vWrBehe!28TqDgd?xf{jbPSRx|Sc$2mT7 z&m4NttP7}oTSZqXK&JR-6bJ@`zvwcz;r&N4lZ@wj+)(lm!0-sOe} z(k90<@DUDRX8FZ{eyz|1*xIhv%ID3D&1ChI_x`>`Gg;e4bH{_FXe&)x)O!9J{%Jv8 z10pkYH+EE#h@}O2-tj)+FGjX?9&g?%(*vn1tBCYe=Zl)^89~AWtfi;p zbU;b51Uq2kF^Z?IQ(~B|4Iu3_9+o_vFy7H(O8)Wz`Gex&iU2ojQM<@HGj-r2_lrl0U+sN@`J7yX!f1reW_&hJt$I=ng)fsW zyM^%)1x-`I;ysWvqdO~<8gfwWjnv|tC z>F&Q@wW<xfkQ`kwLi(OUouG{MHZ^K0!I&e+06+Oh!1%0UD4#6)B&= z``nN0G{ydfi6juFE*nQ=srFgEaf7z#zakhZrMDm(byQq_VH7tWltTYlN=oY4G9P+p+<^<4=s4n*Xxg_SIz(Q}^0KKI9uw{k^j_ zMOFA(?j7s1Mr0qqa&JjW$S_+=iVjMEHu(J#Edz5Awt)IziHVMWC}rYDyUnB}>NI<# zKLF`P%aUNM?4SlC+A^yMmOR|c&=!nOH55Aa=uc5HY+X!?0tTkvG_gKm15Ox~Fdtf! z`fTZO5?g@IeWGkTG9~8yHTCN&Lk+vj7dl+F{@7>+SWE(&arXi;Zjn*I0Idx5(r zbR1%nDN3=hhhP{pPVCh2HZ${01N6PkQx$>Q(DEo>A%8Z~_LGqHX1M7Gjec|ij00W? z9s6Kw@f=_mO4WdgEnN*>o&7EeiP-CGIFqfDejyp1b3)v?6sQ@Gj-R`2r}f2WexHb_ z6ZU%agw8y_oqjXM+pQcNXZ89xhKiVi+NZ%!q>v(W#4~)Vp_WWSzPEvw4tg4jmXe!1 zEGE`fmefFeS(Q2mtHEesd0sEZ6^kHPJVgno!@w>h{rP(O<}eGR=yj5=&q36W?!CM* z`T?8NFbF4%Q1FzoiPn2^0xuW6^vgKCZXS>j=P?+byDBuSwi30O@hM`Io%)pfri+5Y zk%Z?h`B58R6PkE_gO=;U7vY~AF~T-80ixA6;^E|7e4im?11VycOmIh|iAjMM&)Qxl z$`YSoFv8BFNMJ67@T|c8x*Ad*k-(%4z-99%C#jV4MOPh6)@gBs{l#(bL#1_bLeuOJ zMadQ+Zrrzkl8n4jImhy}u&$}}svi0_5VZC!&wT8_=OsUE|1OgTR~ncer@z#3)YaYQ*?#WrxabQv(>BcRlJOplPc+`&byEqu zSe;z0F6W5sjW{-LbqNo*Y8cT}Bb@z~yI9DLEWN1oWe59z_@ipp zLstkhb7SjtG)K5p8T!^o*bGI%V$0FtNoMIlpau7F(lUkM%3oy@Jgwn}2mJpbVsv zYUL1a#7{fVYxYs?5QBa>1dF#(YQhTY<{rU_!+3F{U(;V?U7m}mCSv`f%>`KB;P!kz zg}Xt&!&;r5fbo;^e-?ODNKx7pBCMv5#Q=9?Z@%340qiWT_HhME+>aGnjB0hEx9mrZ zIf7LeNftg$1IABOR1cnJ&qr69m%R&KzW)Zx^)>eOoLqsYeKH1V{eGaYc$UzK%N(u$ z8MW|qaibkx?1xOd_cB&)9AYUp%jDLJF*q<0_OQFoT5UI4v*lLd)Ct+4ihEmPIr4mv zT3^spo(F~bh_%?WfMyxf;(r^`B|7|(5^YW+;rXpVt6mmzj@;~|;6wte-q-K0Yp_`( zJ;nRYI;I)n>4&$m?y~DBjcs??K>Dz|ef!&(xG(F?8Kqn13naL`$21$cW{lQy$eZwAn~26|FVhQ)&w*@?T;1__?~Q@@RdnRBxDK)W08rl#y?Q zHt6|Y@KA+1?$y3I0R|XEtQ~@(1K^*1>=HW?FL;Gfrn>2X2{)Pq4;z=^FGBy{4 zfjcO)iMEO5!G5H(2PbPs`79(Gg*|FPTSn##qwNj0e}D*6oWkc+NHkG`3@+qjcLh$g z6T8BcIDh5pr@KhK)%}u<*)k0r-o>NFVRBhT%@AQ;O!g*VVZ;OS{t5M+4pX8jTz!fP zUT2PV&V>2OR%#=s5Hrr=k$-fVU2zn>Jc>pmcCD1_#PBHiv8-}q9RPJ1Z_sK3Y# zA27HxB}mSrs&X~!2ZtFIUal{SOK+u$jHlZ&q4q)-v2%g|cEWybWn=rLZAxos;_5{; z%tB8~2Eq>yW6O^wzAD|gfDOFqkjc0CGjmeiY_{XOG^?&-Q6$HP^*dnBu3^{n^e50%;zmk zwc;)Dh{=LBWv_i3!meuG>@sfew~lS&oWM4(iks3~-(?eX81YZI(rwzn7q8e4y$L78 z`4+D#Dl)qaN59RF&-9O!cN;@5)2r>hJYeq;n$uAs@2Z01pYA*HDdD&aosaAzKvx8E z;z|5zUuN5_;k4*HQvgTq^b((I=_2NFM6PVep|fAkTQCl z&iA*FJ-;u>J+8DJ8E=4sJW@%+GeXvl%);76DFJM__y<;;3%*z(2_yT_d4Q#h(SXz0 z6TG{`sFOjtl5o`&3Ged>L{cVrz63hs^eEUZ6D*s7n5fFr7<#%-3rYTGcaKbhthnSZ zvMn0>usnD8z?m8QpU?lM#i}y)Z0=qUB#J$xgB`xiv%~V#F$kHg5_n%lgUE{WATED9 z5Z37T+_QJI?|5hYlFr(_elO;+!WHpPS#rxhmNpG5SS98N{B=1haE1SAlEl)d)k7SZm;xWjT-*RY{tG zN(H#)Vd*7mhLj(sb8{+v^nR8Rgdr@)pL~OMvWo+jlthKWH_7rkcSK^o7TpqSt26HQ zv9Lq=2#t$3r9$CiI%#aWbC8{!1MS>1 z5UZ`lXyO^rgtvE2oaic!Qw{AkN z6!GotTQ_1whY#DPT1GVMS4Yz^iWhhffQ4ra^;Tj+mN*O>C&wdZ3%T64jdoSXNnenO zea@%o;r3TW=&=scbNiVI5=Rkvf{pY+tp_o;Bx zO6NISp=xBP=3DE}=6hZD-a13GU(KY^;wTpDEg?TV(7+$NWG6Sfq@BBZplR|%&spuHx|0@}%*`RMavjdMgtNFi z?6c5OqGt41{y;bM{lvvxTU*vUA|^2TBJ}1zFE$41^u5>}a$N#EgK7jS@T2(%2G>^u zMG{EO*o?=r8ydTN9h#9jGMgK))f7{VAG*3k1F=q=N+F!wD9-r_E9JenHtxz0L8~e# z@$=-I0ZOD$dj8A3`-#!zl1viIELeCBWT;@>EhpCqK4;Vcq!0WX#j%{b>K6JJiZpFg zLP9l+3-z;xQ$p+im5E|cX$26GUzO;&G zuvt5i4v?-+;G|D!5DyOUcJ{ngAt%H4hV-q0pq0g`pm8KZle(5EB=8fd^L-()yOD^# zcq4HkpO)Razp>a{d64bm$vt2#!sCF0X;V_57i1&Q^k5mQoB|V z<(W__{wk;H!y4gdG!mfR-L1s1{wXPs7V}I*f~fHizL@>v=egE;{|Fp0oP2B3DGsrcsO zmPqMH9F@IC++&PuoY2+6lP~d~ZW&j^aaF|~zOE1X+;!6}6HgrY=xS-#(z$}T{tbV@ z>=beW^pmP-9yg&@(NeWi!rjRfO+zyM)>5|4tnk1d8+ez;yd#xM znmNhT$l%P&D%+dLk>Wa$Q!3^Mn5S5aa?m{%g`Ky@Cq~JA5=UvjEDE+oma&EF1bEdv z&`!H>_o}HyFr4)2gsQ>*``6L#I43*9KSz+?morzu{~h`t6(dL&;hE-9;`#3^Ej2yW IT4fCOKM4~1IsgCw literal 0 HcmV?d00001 diff --git a/docs/static_files/moby-project-logo.png b/docs/static_files/moby-project-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..2914186efdd0d3a6223efed318a9e6f09d79359e GIT binary patch literal 20458 zcmaHzV|ZlY(yo(C%#N)|GMU)6C$>GYZ95ZN6WdP5wrx+$iH*~w4F! z)%8|aJ#|lrO2mlH7DyBnN}?$PCveLIvw4$v z%-AE)xa!-$e`)>4SV5AVMc81}0I9(3ZL?|pnfk@E!OIOw+RU_fX9Xf<1h z%79YQ@CVZ(s}06#ZRBITfB*%`T+~(P`6WVDuURE-mqP+Rzx%n({ks0)?>a5^36>qA zv%(A2V#-n}cV=NWqEP?7#CHIv-!2*^m3qi}5A@@uoJ|JD;gCF|BOOGZxSi)i4Fc=W z4f3@nyY5E2bFG#8`;Uutjy8ug_RTcE-=>rLGH`xUf=HGx%4dhN--l3U@0qZxp#JM0 zbbu>Mn#+>uY~|*zpzo>=Zz`363^r@P{=JZfIpy1(T{1i-UuKHwjNkC76;hGvm4-u> z+ezgS%%JP~=PXwM$POhzyOoY;M{EXZ3n#4H6DRDMTWkj2eb;0BLxJtjyl#4(!lkS> z#XjfNQrGzu=NvdYq5u89JYe0E{q96+Q&o?2!^A*|j zTkL`{wzK)BGmc3@|6|$zdZ;t$&NT#{xW2O4bWzgk&0I~E*G$2%ybJgro9KXP@I&U5 z5@XGqGOeJgcc0ny;_ruK|8eXNH1Jr=nH3{KF4DLJ(=l1T7nCAw27QkMq(5v_-%3uV z=gmsZpUYKhttq%Y|H$iALJ|MhQbsWFSd5up@1v{(*Il( zBm@EK6)bqP&q6?VtSPjCmM z-obN+)IW?U<;}V}^<89CS&`rZBw~q`xjxtSw{xjFJznVx%l~BjcO$3y`>(^BS7xj` zpUSCPa*&q-q0;@q`n`{~zQ|nY#);|h7zl^uBmCFi+Ax1@_+yFK9EF&#Mdq^+i$vIz z>qIlzDlx1XANRkNQn$YFTCDu6{GT{e0kG!*?=;;r9n-3X+-!4NuJC_o93>eUC%(m2 z+)6kIDQYg+&JV0oe$jQe_cMaRC&=n1QPOC8GYtp-7lrh~1l?K87Tk{ivRb^zu(V_N z;70XGU@Z)k)C&-900WdtWNhv2vU@+KHocsKi*9+HKJ41upCgi14i?XrAuIF6BE8G$I#*uwV2&p6r$&l&XWio-5nTW6Ex$O_pPI`@-w-WV8M8 z?(*HsbNZt%6iHgKKs>KhF<;3!^nDufbzjo2*jRuvFY07oX~@}vN+yl5I7)R1&WXVV zYSR!Wi{C5zqs{%-N0)PYcAa_jO1qRX?hx3xn+Mn9&HTZkW7aFyYyuFUWar5U4q`RZ^h6tBR1o>=!X{%3t0I4t}brJ&# zTeWs1butYd9L8CjWh`mZ5wr9wk~VN2>cJ3z*4w?Aa`lHUjRPf_S}*vmTqPg!dT&+D zA<}6A{SgB?^2b#^hsi0*Y=Lt1xn`NdM+e%+ll_f&A4{G0y(iYZL2Ca||JK`e)V?9H zQ&9vqh2QpA66db`L%Airz{iorNsy9LKLBrkEIyjK76oC;`8U*D-|cYXTe(_-4@sCI zxG>^puy&K}7d9iIA2P>MeKZ{HRN89vDSFPJh*$vLF9BC!lr5h?sORu=FrOvN6}78t zGlV1@fE10**X}@?2KN4Nf!|yFb0Z1&?!5IQ?OJlR-1dA1T&3RJa8U%bCs6P4IHQ*A zR)^5Wyp=cU3nLU)soPBwp%=PT5hAWd?&8Wb9Fd3xBQ=k=53H6NqFhT5eNqc0lM~9- zs|AtH77Bym{@`f^8v6?LD(0BBZyNqEtdpCQN8a@%- zbp=V3f`YE6pBO_HV11kh-tl!T!!G3pT%~@g4qZxeIIVx?)XORpwQnnH-OLE-6(04--Py$XCG>S@cs?j)G-Ex!^FNt zQfDp7F4?kbaLd~liKsl8=Lkhm&|Halmha!DXsIC^l1&gs=CE3im&@WiDnB_peWYFg zS%#yKYRUE-;*Ma3h;0kkx%bG(HiEc5(}PX_5FORWjvb z5$L#bxOsgVoye*|j7G{7XVC9d3hbR>o^U&8i$3>wNEHTbw_V^qgXfkFz=X;#g7rCB zLT4GVg8>#?GJs#roij;k%TJ%w-HOXL*`Kyo+`|8|ALW=$&FaPkH0Ay(Jvgf1gz1O8 zdxwc#IqlnCSX9~4;VM;~7mq+kBIf74KVQfOR%`V|5hYM9p@w(^dIYTQ+j=SGM{UR_PNN2 z_RCwY`E05VS)>aF&Jx2&jfm*t%;dHj;xQF+U92<^FPaiV@Tz~8a&jm>XNi;)@SIbj z)*Q?+;JS=qrMKs{S#Me{66r6;l#H!q%UisJjV$S#(1@kn4T`0#S|3msT!yQyVT0Ao z;Nq$Xty-H8l8DW+zR+pt+T>jyJk*zs-rxka21`Kw$!Kfx4IJUT~|QZ8$D zV=4>3@$Xhxc0;Et3*7Z7exoI2S-Z~}>zqwfrP+B{Y))=GI>ddgf;Gau!!M~ti-$gn zh~>?)s||hZ!n$g$?$NAJifAZeu1n#SjHe`7whdn+jWtUi%%k?x&g1`9B5{Cbcm z5bm}sWS0=0aLOT7N0kt@9PZK{0lckg6oy~M%TD!keu|kNwP7Z4iEuC4h>G3AdYfe= zo!;YBFuN*EFE+6~GEVx_m@}L;L$0s;y~uPIwQOC-W^jh=d>e}Fsc>$>n;1OAV@L_T z4SSekxoMH*_)aa4mkY{=vQeq_3^y9OP@1ortfcHGhX04-SVr|j-6i&(gu2F1t+dK z7_>9@q~@IAzARc9+T!y3eK+%3T2J|Cfivf=G)YsfSY?R!-Q-b@FqWjdbvVWLZAU1U z&yTV3btvyN$6$tl-C%OQ3y!0~bzb~VV;dBD#$;Wy*&_KHc;ZwcWDwCdsm-E|`KUO~ z?ewL1GMfQ6mTR89u~8_~|4UeGdA6+&4|sqIOKn-W%~#$A&*Rg3d|GA_f%PC`NxyIA zej;COA=-S$fsnf0=}BZ(Se9G0^r`gMU;oZGk;&q7CXOE$MohlOZsevq$4?EL$uBJL{H8)k4^5bx8(uo75$etR%o*XTg!Q z&4H&2TZ!g|SP&Yhq%jR@!ZKGcbiOXXDB70lEX!g=1wl@_Fh{Pk7~TozE7leiY1&K>gJ=1Tw}70Rxmu4mN2gOqQn<;6RY#i0 z9zfNV-TpvmE-asq&;ZbA8$(mn@rmtrJ&{zv_QW^zs)3U(IQ#W_V!DYi@GGa=xV8Ke zqFfDm)?W$}y+W#X-}PaOq>uOy%GK8a9|;Y8i9&5bw@Vg(y?Plw)45nCBPIRf81{s@ z5?NXNRJt8J#I2EQPx!x@2w3e!u@xmdN49&SGgx$x0bQQuA!3s~mRQMP<=NH*J&NB3?_1d0XP!ZIG;$z$026%k&_N?oRGz zGwS1&(8pA2F~_w=@4Rvb!hV=N3PSJC50pjI01ikG7>JVTtP%#=-|2XP&_$0syO!mi z&RL82?6cKAFub0~;>bkhw!sRjA{{>Y9JWrjG$|#X_zs*qQpKUpaK~EF+LkwV$}A)j zE&#MD!wbBaa))+gwt+*;NEH~N-K9Eg7VhbMLpuHTtw=K*&*F!Zc{8f2iVR#79R4-B zx0b~yRXf6gA4lmOqJP~ts^0f;8&r_W47N6OwmZEv9Z`Zs7KLHk25DyCUaH1|8`h%y zcAF3v+Af(>)bIXEr&dPFaD5~nc!+=Ku)q;RX-KM9tLDTN3dZ?D`krm2maCVOm=MlS zPx8aCyyc=IeHU;_eLDFpgUdVQ_54FliruuDl-X)a;)Ji{_jUiz;u!~Ag$|hmr^^Fx zuPqsQAJJBz35`g3-+@*ov0q_5{~(q0(A`8RIX^`>c4}8`=dOf&_8gh3eoXBv#I{v| zZhQ51huFAAT+Etl0tuvS%IZ~`47kf~OTLiN=SWDeF#*%eyIiKS? z-6dqxzJ|=Qer?Fi;9W7&j4~%PBF7j@4Qm71i7<8vsgZjvWM@dnUjQ527e}6gR}RHz zHBze4Q`#(UhQl~Yb+@OBiP9MQU)LRUd=~F9ON`l!Uon~*2sYoYGk?bCl-td zu>EJxLNjPh)TU9XW_Pt9Wk;H;vC0AVWQ7XTP5)5t>he7-?g`Hk=~piypH8Urn_qD_ zH_-aQdGQpEZ1@jShkEt8V2unr<9>*CiVUv#^w6S8VyC}N2+NBguFiw2orWatT=DK= z)z_SF_m=ZR2M8D;EO>7NY6;c&@uhs#4GgxlRlAl)Wzd-LIMVbtc1yqhu61sEpbg8+|SuOZxBrto9%{ z9km_Q6WnwRoz5UdNB4dY%taz#5r*#xjzz12l!e@Cr8)c%S{Y&r1lHl;S%#O?;qZS) zA>^wr))ux$&(@;!^2oVB5^wM);`Z#YD0`@cxlnJ)H`H%idN1Xo*UiU?He?VJdO_9b zcH!y#H0WX%pB`CnvX+}EE~O}(vFdFU=zzOE8b-n|{AHtU+8+Q2;vJoZ_Z%H#<84oL z-xcw=9UFALJ+?dnJemz>n9y3tdD2a)EN=GQkhS+=fd~EOwM^UNv&U@su0ujxrO7%R zm)xiYuG=Q6XfdrbXs@cOh~F4oa?gIQa#d+TI{BX>T$jV4gRXnT6>w)TRi!GBg;e!fRP&jElJx`BoOs>`(N-WK(xN6(1 z#jrn-G8ngl4uedr+$20WY@P+?%Pc5M;IXf>?RFflR(igqe)eiTDVXR_#ADKCitchY zmB}a09Od2W#J#>z>)OffreLyfumaW*%=D1I_%dgi#cYS^R*ZN8I~1V>H*NnXSir&t z>*u?YwdyV+rjb{Z>1_5pOsgB4rM=%KoMRGnNNiRQQ-3o33H3U$QUdH3l?_JocOR0^ zs)D6SJoKn*ceF;cr*LJF`!qI=noqNHAF>*^AC12(1l)0p4I{@>J*vzpWFb5^_D{5) zoyuxd?WT*AF=ei5We9KEl^Lut&qZXD8T7l99(_f4e|BlIm^gc8rl+5w$d8RC404Jn z9Nc)$a-I5plHTJ+5nT7Vob%l3;I2JrcBx2?noI$8rk{JshVNLHH-H}fp5;-!tSqKT zZH-oYG7W(BOjBD=OIZ3~G5AAuWmQ!|3CqiSf-|2T#Zo5rf_v%yJdo7th&kZaM3V3Nz`NtJwodhrJ<(2*Rka zOb<=Y`PE*d(<;aTMa4o1(YV%@ks{u9Ck#rfX=NY}hWAwTx5~M@ z!?b2XGM(y)+^%d9H>1073YEW=B;YBQhRjql$Kna=Xc-N*c9X(?4WkSZ1sxO)&!YC$D5|K=zV@vB8O{ z#~etf_NZ?dr%{Es@ddi#hS-CDDxYwP56@e!C#8ER?c2`@&bexZ#b%Lc3-}J4E@de6 zr_>w}FrCg9N?C9%zMUxu{x!9j-?E(e$n(H(#e2e_J9nU&+sg9WdvURV!K}6Y_nxqM@|Ss{}Ka9wfGOReHBO zUGU6bEUcIL4EOTpEH>nZsNp<+A|~JZqUw`w3a*Oui8L_1ui`CV)0kY{j=$*7gEA7w zu)J`lW6_;WuPKPNV1la@s(w5z8!18Q-*LVkMtKYB6F5vV+A{+YgUe(>R<2Cl)G>*^ z-wg{@fbn81jR1Zu;N^Y+(~xhgzKM@`G%39K`S$fhkOjEVMNPVwoaJy~F;Rl2InqgS zsGWHkOJ+zS=KeG6RefL+2`ZhI%6J}C_bwUav_f&oyxn%!nMHNEO=BskS((zHs0l?j zF~jlA`m!Ch*GDtn2BZ&7%KXBR-ofT8GPI`kUqdQJ{m#QZIi0Q~l))(6e!b@MF%gUz zmP7iPN>7EwiU=u5GzyjEC_yNi`HG%tjIvKit}w9abEc_i!NxxU{uvXJS}#mi#MnlC zQt)|19*WMCfhy(~uN1@A+^6fV+e?Z*63YdgKLW}Yy7(!*h|f9x@?Y5v@MNFb(0Sw} zABNQfpjdBK+al^sr}oRMTkj&BZBFK|*jRU&8Bq*w(mM%4GS=^^guP$GQOQ1a5z(GwMMo~77s|_kGtt{QYSWMVH?b@t-+(8o2=(%W?2Vacwi^=>o=w_70;<) zuaHA;KuwtWV&A%lr# zPggV2u28HWxL0C_)Hl%JxF7_Dt1X1)zv zz#mt&KiK0|vdDmOJ3nm+PHLFq5|ty{^$3p>bk)7*UU)rNSmbM{i=rt94<-q3Wh0tR zeTqHGDmdx4IY!T5%S&a=5rB7Q4aO~w{w*M7Rhp%9$pB@A}t1m%X|s_eCx(Q z=aHs%a;8R7uuI>_2ihT>Pte3bPW_8=ch{)v7fU8ipm{!eKwl;l zD;Ba%`@d<{W}ACZRP{PM5>V2XQqzcf41PI?OOUK$tjI`P*#}5@e$Tw$QF;H!v7Xj{al8+puZ?Jy19K-6e#CtjK?D!|Vci*x&-1Cv;TU_6l-fgyC*9i2{qN?eHkWgjJg@A=vE-y+@#X)^tegApAC&58ngp*AMxidP`YC|Kj*rGSw5Twci znV8;(n}{>l9Hxw$(EWYeIbqf+V!6TlYCE7fR_*mZRhn zze&n4rGaO%4KXJCNav+`mHt-tJ1&LZG>Uj7=iUx6#hnbz$Lv-0>E``!Y!(Ro=wg%} zKca0zact!G`Vf{%59Q+|5cp|XVYO^G6C{z1f4BFb8_(Gvbu@p>p9@dG;!L7it{CgZ z2q+k?P7(NU8%8{-0eY5Jgwd7c`FP@R+pX}bROkeRkhAgcVAW%$+*28@cpF2kEuIe4 z=x4d5Ga^#NM1|d-iN&Yah6u{V)25S0an(1EpKiWBI=>ImwR!GA>vsAk0;Vw6aT(zq z+&r%%?97GSa%wnYYXq}(77C1b@R%(#mu1oWDD5X;U($WT5tPJpjehGRUPm@xAbI3Z z0%m+wsqxy9NzrF=bZ~<3N&M^`K7kw=k6bmpd0N9z$tW9O*1F(vxo@!RL4kOaD6NlXzvfN9>)bMI2e;U)|0Jk@tR@ceD(&X#cYMQ zXr!O(@N<~i41nx8Yc+b|-`R2w0ck3SC%v`r8tzZ-)9&I7o{&9DQ8{QPM_^R$qZp(G z9vT>g;!mL2fU_1hfj3U5Ca*Y2gc-+!Mij&F6L*nn>*`l15u2mJXHa@ps^gy9mK?FQks;IQq!zf$>nuXQ*w zB_C!&jUZYZoI@8dZo#2V;I!lV?0oUNIo@5UjcN5AU2bN^{GSs#gt^90B2q#Wt!?yKV7}5(ONFB-@Mge zh{+;CuIP4+6F3%iD{DN)u*-WmT6KJ&QG)~>MH3|-CjQ6Ux zJc^7PYF};dC8S-;`&pyiFMlzkx)@aBC1#px1U(AL`fcB`%}XD$*#4eRVO~J5Jo;=Y zKhdjMsR7yrN&7tw7xTwthZx}=U?hhJn{ca3NGq#Nc|_kh|F?h*8r^S}tUtq%Rs>+& zoUdxM5@)Fs1;KD=$2)(#mAwfB5$cUL9jT^*ZZn^N!B#Z#lQKNx0ei3)IA~QW>loyE&Mm^4T$1G(Y@_;8p+1QIp(A0=s-J z2Z`yH_vX=xG)jBV4o#algt{`+RdE83AWJoJ3-GtJ>aT~IDEf)GJ2({S3xLSLP9hqF z!Oi;?g(Gu>^0(-+yD3?CH-jyJm#aa#TF4CfGCxHYuPx+M<6^^TC`u?%961@73&7s^ z-UkD=u)?!km79EPCKVtQbt=uUtxbyyiz1c$m5UTMLX$j_9GY!#SUzXsRKG6A$xmN_ z7CR(6K=6JHvy8xLx?HQBG>g+k{4v+47CWW^BmLP0`|#;LlUz~QEbQx`&JkCNJ$IvS zYdV-zuM*=;E-mgeWqC7^`UYoRnRb{yMbjfG)`jh6F2yMR zx!U(}G~FZ?8)9BO>8kOw^zc4bl%-%n#_^-6xO|IAy_PgkdNlIq_N@Ep34n4{CG&9} zdlc4kzv4ke%`)@D z1)5YmYuWxUuqOLUg4H5X_N@?wtJNLVSl&_Cx(LoUKm88cUCa)(D;948!^Lpkaj4Y? z)m;b=SqBDqes~`O)Bm_hV5`JjDMD#a9C^zO7X%A*0e8gx3 z6l$$f{}X1(Z@HT$LBy7D@{D6U4tt%^Fly7Fg6e>sZU6zB9H0;c78rWG8mcfK#<3`5 zDgNd`0|N6{G(7eBZY%fnW#yep*2ba+elij>xaD@jAh-524&Y=88qD3T-lZRu9*Rk# zU9)iRv3={60Fg}gevm1ZI{hYbGaV#{ZL8$yGZsq4tX_x_@47ACAw&WmLB1nSdi7U0+e*Dm_OgCqY~QZ`s#kC!Od3!dEK@_ugBv}+P;>e`AqINU(Qyi-?Yus6?OQ&sc-VeH%gp;*jgxQ?avF(mm#WW^uVl^Kx2qD z^8oK^oUorcR_wNFAV0tS1qOM0$A!>>OwW!G`1Qws@GEmM118FQ*ZM+%c5-~|LmKoM zT$Vy3RP#n*Lz(x@SCrWb^_ply`W8nCgQ-3Wknkoil}U~&?;4m2g5Ch|fF7)q*6yc) zd3%#Slyv{0PN!6y7W?#-S{JYH?wP!P5m#=Z4mfsFIoz0bDSCDTYM3NF{2{2gN&ebED2W?lVC$@@bD#R!kj?HNcKBnc4cBK3w+{&z|qRDVi1 zw6ZG_STl8re>EY9sVW(X-z59S;s0@UO%|Q=0)sLEg)#*z$l2|EAHlNhxB3f=BD9A( z4IZ)sE8t#+^ERS?M{19897zddrX`JD)>4|V`6-LdFJ42eeK70E4&bNTA=-dN`%&Y6 zt7s!|Yqaf{9cVxaLiEzY!5HozrhlO;qH@&w)zRdRr_h>yt&C|GR!d+?Ea9&HZkZM; z`1m10Q2gNJ%DAiHyQMT3$+@o6i2l!DT!;~ruFn}%`i5%FPU=XQKQI0yu29Ye+)XVq zNtB5KhjjU%fVn@abmfQ!F6zN!|5Tb-Maa>QT}c8mU63RfE8w7F*+HAJWzBP)*{mIc zdsiN=Ff%tL;_@o8rJT^~f#RtZ&XEw}NeM({bmx8ft|Hi?jtgIFKo7WLhD@o}8aEMt zc04Lf8clTAElT~R3yw6DK36h7p5x{9Bo?_tv0dG-4EcOhSw@{*L#24*JL)!!zb z)6ED{E{8{c^93nijj(+-|Da+(q7i?a#*ScZ_0|u)&+8_a`=eSiDv$NyN-89UxqA%r z7^EhrD5rjkn`%bmEtW|hNGCf#YY3l#iOazjf(Q?M?)PxoAl0qRRiq0M#-p&F;tQU# zUIv>WW|&?Dv-QhPR!V!`ZaGNG072OQ`150eUC|Bco$+N6ql~V#x_*@OwUP<^%P@hn3 zB0`T3FL>$|ZM5pb;rB-=Xt1Fmz_(x#3Hh?)J~bJNq*N-y!43 z#Uwckx+7%{zC$)Djv#a`!QS1?xrv8E;DI^YEN8sou{I3#%@_r{-qmRRGC0i6JlMt9 z_k`5z^cDF)%0P?ik19@M`U!gvoF#GEk3}&fM{n^ts*bW+Q=u|3$=}h3%*|?4)eAML z4he*WSc2umn4yTo%sW;rnA64*o`i)5p-Py>xFTcE7$wXUk;>&5qtVCWSI%ewvBUz3 zAWrT0Eks(*ab~F!l+do>g|E>|R56?Q;9(GW{S5uVN9FgwTPF60)0n92s?X0EC=bSygB(q#PuZVKT4iP; z(|W3^G}~iB@k2C85JU`1-Hwn`l&{Z7VO7kA3a|Hu@}S1m=B$cssFWCq+3Yl?MZ_6G zsQ~u+kScJ;Te{rkSTiflF@$Fl5%`qs6f$xCbA?j9A4<^g6mn0Aulc0nSixW$W!!hi z(x74=mYU^d=ZO4QFlz2B3I0I0eSmkcz(1fFOyJ*g7^A+XeeC?>bj!^6K zYrpk$fJiW=1JPka2B{66#UgER1LC(>YC+R-OKZOg>sGJmEKqa+5QfvgV4Ah?B#&gBDQ0B2ijn3SR&E_o{ zjv@x;f^yekAle@(Z`qurA>`em`{QdP&yk@RH~Ec~Ix4-6mbq*WM48F29cM%=0@_gr ztGC#5w^w_yOcVubkP2wz07i%az?b9|S5DI#R=m5{G=o8n=ghV4Ahf!b%|KpkaB~sg zlbJyZDn7DW?2C74v+zd*x86BS(8R)I?(aalH1(EI0%^v-%qohb&CR!o zke{`w1^unpo5Fzg-5}w8=8WUhM@9Q|hXYiVmm1H0O+YnIM-y9pP@z2=FOMZ=*(K(5 zlB<1zy4f$TpJ-XY4b*321WS3Zxvy$@MC3A9jpZ6nlRh>JYHz4wr{pmX3Jgh;fM#8KrpG(*?g{% z*R+iSl*3LFtU20@AgN^NIW6f!n1O$z^_+j`{TXJZOr|?DgnpNIUT?wBY;c)hi*-+v z=J`5^_-uLnJN@?PcDYGBz+rQ$;_Q6nw*~8N;d;sE50KD8AeBGQ&DNy?0RmbXOnL2b zZFjghm6L6QPC++QUz;ChX3O}fSSP&93rP`CTj z6(-$C1E9tC7Av_*Z@s7wmtlD}=rtXg;Acc?nVNEg{g7bYA9}^rSnm5%c?F-FT#!ch ziA1&Kw|N9g8M{f_Ya%qjniGigbH(uz$KAL`ysF$ zLJ94Ex zH@)IZ1r6>feHxP8M~O7Dp@LUtP|E$56dQk?17-R$=bBrpm{@7#QW#E;ZlECvvZ@7P zG{~5Te$=EdGZQ@@vpRHDc(>rR%AvUp2y|wf7KtAS8A0( zSos_My4g_YM1LTQ<~nN6P>-6swth3wb}U{qLFIac(X_bWEfH&RBn*@0;YRNVlR|d3 z3B+o*M^)bE%&`QhO<|C}X}9vX(dCHfA{xn3u|W$ngL{f|GV+3mm9~y_p%RKi&A1@! zq2#TV&`dXQRw<{K%$Px~N0~C0Tb2m(hQDo&h;;+wFP}S3u)Q9ZuHBCuBBg|kTwxv< z*qYV^T>Mou#N-#ZzcQ-SNll&PJYlukBZ&K<(di?92u6&m_KY^-Q$riPW&qWo=6Fgp zRZlZ>G{H*h&!H&Nc2jMCL~$GLM-p#$E%c}nV5N=>S3`*P3``SypDhum_d;8%b5sXd zkf)nbs5yo4vNeK%4lLX26BWxyKAgV+f9s<_KjRs@>KQ9q9)A@$>Tm?Cxtc=|LZyua z3NDF}`}K`#2CEUMrow1J2FDRzw3-dQNiE+|>*LaDG-vg=vsVu2jhxrwRHxO>c(B$f zlHxSV#guxTxkRc6jB2ZdLFq51s94g%F5?DbO;!D7E1skkBV!)n`U(p{u^Xjx0t2j8 zrpTOyX~eltKRnw3xis|T`Vn( zQxET+m1;{Ho&lKD1p8{e)~|#cZFJgW3ZhM+O;#(el%P5{17I&D zSP-V=tD5d5q@`&_HViYi0b`sj8juHTpbB|*K;49|L!w z-(hBR8*}fIl131^hi_uo2)DNQ-Vo|W4zOO=n5s~t9afv#RDBKgcIRzyS?(<`B*YwzMF$*uNw z*f#cPL7dS%z8X&!r3u2ytZ`#Xe`u4Ca zvQmK-cMt!?6KYVAAbARNRd?EQ)OCSLR|jKm2B%N4nyc$s&g)s=1TjIi=zF_c{rb=> z)Cy`8!Gb)Nf@yx=M^^|oKbkcXl#}T=b=fkVOXl;S6I+DGw}D?5nfk3PJ`wC&zXa`1 z${k`P6Uo0(uuUBjwx@v)dq!`Fx|jj{^F}dDYt=!+X9x6pYBxbmYQMSr=SSET%ILGI zHpfzq(x4dyRLz1MHHHFK?y}aK>NBC=p{Ftlovpocjmbl~N8SoeyD?qj6TSGI#OjNv zjKn!vl^SdcoRRlhes!b7>t--J(MRrM)+e`A#%Dnk+0=gUai0NoB@@2qSuaMY)+dzo zJ$EyV@uOW?%MtPjH>eFxv5M6R3UWYzfK*RnmO_EZALJZ|#~{wVH$oAh52?qrEmc~x zTLTx6S)qY@I$BvDoBfn%CjsqXwUirUoNf*93bT$koXipP{v-6nDYnFJ708rr6a0PL z1vsKyh__fyokD|YYe?J;v~)g??nnBCO&Z%^V@ln^c(LlI;D%ibzEWrA$H+fpo1^pYeA3T6$cDrCg=N?uhSPMSgl@N1q^{RjAC48eED}oV+v<-} zlgxS=A?noHYmRt#6=HD|MqVjef2{3R>v&3u$Aua~UFgC#e6_#tJBc4FbUL4&dte>C zi7~Ptq#5F?)$o>rItsLMA_E4M$OK$i=LD46u44`uT$Lt_zaX&5a#vBbEIJOPQw!lu zq-Tq(cmj1pJ>i57u(|&rv=~Qmb{44t@7d(n3s# z9T}^Y&}sWeTEEIr%2VsLwqPd zOT@lqh16;ifTTZTru!md_NA?1m{DgugQ}pgx9&=?(i+y9Pf|ANoD7r%_IQumX}8V3 zM}7)f36r2%wfQ_|-y8C@}iPUkhncqC&ZS%MYK}DqTiMrEpefud%ZnO(I9F6n+H=0h(DZ7=d0-Xm9>8w8q7X(w9!bYuiz*``K z)KC1KhrmTEXDsKzjO;bzg8D8~F9J3xjPi!1M~l9}sam8ihsq9I-_n;p7LpnC|PZum_{w9h{{aSjTT`(OIC* z>|(wFyI0LC79J{!w>Yq_hEn=0(tg`$XKG70Ht0yiVHDV)$2D&W@Ai+sZ+H8?o3_&W zLP6LD`M+@d3MHNX%^kp8_inz<@WDdcN*S@(@O5ib(|3zN zQzRy&a%CpB&y0PkpSLZbsME%Cm=m5x-pj|LPEQWNkR3IdSTbZ$l%&=U=X|IgQ!?ZG znbH)Twr{G~(c)mD%jY^frpFFYBmvY4`&UG)_hB<+U09{iK+6ektQXt(C5m0uHQqPs zdpm#EEcRUEb&p*x(J%x}VX@f(+n$8t`W&+%gu^f21 zoj5q{L`UlHUzgU>6R0&U>~v}L>F$&HnQiN9b3tOmWvD-Bz$j5Bfs@omyCJHN%A^H- z9}Em7?caX^lqSYyBUv1F1h|I^-a+#+!f1R}8)%`q74{FyI}$GC84a7f`N}2yP#Y=$ z`$cN)MA~1oqDW+J_7cj3y+K|Dx0-%eo{xGlp0cB6{*81!)h2CuuVe}*ups>w&Bucn zA{5SCFeGi9=5Qjn)^LB5+70b1x+Cl&` zl}5LxJXO+CDPtuBQcz7K#SM(%z%9N}`B+Dxkk&!}trLqz(;S*hkkE1!!U)!aM8uvE zT*@ix#b2pFh!>{)xt~$B{YTxFLAK#?p9TvYQ437y^DCG-8LXKo(9taLG)3Llh@7g9 z4KSWU@qcPK^Khuz|BpvB6KVz}vJ)c0(;#FgMcD^qE&E`MrI0Df`j9QmV3K{JVeH0I zvTs8n`;4p&WiKLS`5yhA-@o^Do%^5rT<89r_wwSDTsyzjR#<9$nx_khPAqwXDsjlD zkB;E=;Uw-w<{uPuZ_ig3)JOWLg*A`&zeqmsBHy`k#s23LM2NAif>MfS1{Oa$rT|IR zvd{u_6)&bc?1Nu7#~1Iud6?QY_5{R_roK;oyaJo|Y2h4s#sC6x;%@A%)4VN{Th=$7 z9tG_B*tE0#`$bE2%1^Pq+o|wV7r7G5>xrFmuz`!f%`lC?bnwz!1lIN%c&K$AEBy!x@zgp_@N#YF>y{-vS)6(UNaV(zXcI zOQ7%NHj>uLawOWj+B>4`r73Z#8UtAcZmZL5eDZi=RnlQhk{}H=O46~N)bGB=6r-lp9Jm;i*T zQP0LFJ$Jq|b{#r<6j@8Fj`lXuF6%VH1VGGHk?_s(l0EflARt0%N@!6f!@}q=GW;cV zY|bySMIZtblwL{2%tGPdV%~g{6oubU8Bz>?S)3NCKV!o=Pe~G#&-n8$BxqezqbnZy z`8@y877SiOn|Sj*VrU-gRcw|JqJ@{K*RUj-&Xgq>Ru$eZ25pVjiFxQ7{26ok-b&w{ zAeXHvFPaqE!6n630n~-JEa=rzk9ZQK!go(9D0C|<`4^_hn1n|o3wCkoKDj1JC!vIy)E#FJ--}q%BFZj**!#%pDqFSpI z5oNYV+V-aly4qIhCEN2pz}UTKDUJTzF}U!%ma#Z0oXE?l=-B%M1{4?J7a2?O7>IzD z9>t2l%ksXZ{m*VbkRSHV=^MDsG2DOK*<~XP_==ef{df=aYIXYE&(T*b+~06|SfFNo zLS`bXaz?ENZHy2cyP-d`ZQU7K?y@U z3rO&(^z1Hu4Mt^Q$ylLhC!pYVrjHtB;fz{_#Bp?+MO165mvxspK^uX;4%oypZT23= zMX_o>)*zll!>$*~Iw8~spD3Ds#8$~nTHqS%dRv5_)tgZv53DPLoYsD@#cLNoDa+y$ zVos){T>jw0c|zJ@0K_)ahe{q^@8cT!q)4(Z=FaGD6Kwtk{YnkWeaJ48p`IyKWkLD~ zy#dZmv81q{36;|gx5XI`i7?Hq{`x*Y6f|~<{jhANQI9rR9dFk=QDvIHa|b)!jBzIz zwo;Dk@GR*hj#4&T#2`*t- zn6gcS&q)BZVo>>HcZNzd_{3(n4^@_6; z%MA{ObQTYxz!!A2oj0rKpbfS^A=Tmu1iQ)VM&=f*=U7n$%jwzAPQK!7jC)B|G8u%g zitgIG_{L52;2kps8>JBcHCYvk!`Ir2NBN$vE}t#G)w>L}!GN3t98qs}}@gjfebVY3F+SKs6s^+LrsnmTs+h{HB zR-iShZe{E_VAptc-kuRQTXn*mV6=+ee8!A=hSKCFG>#hy=1o*ON!oL`I>d(=T1<@v zi!h7r$X9xrimdLn2KV!iPyp)#+|;JbW;z^Fi~QL(DIlDM%@#A{u^`jNuNFK&njzv~ zM^5Dp$3620J1}?~A^;@CtI7RyWjc0|DN%xO*8wQHJljN2)HNl@E$Og z1jLBWfh^bbgM2JeNnuUDP$lVRVc~u0)dA48cOZuJl6q90T|+2m?ebvMdpQklv2fW% z*#usb-I-7DDei+IwKX>srl3kPlM>GiVvlzjh>b3@Te<`MKz!^=j%usvf&B9 zWRe-UI;kI%iTiTcuCJwDV@(%2rRaOvvGEez1(P5AMn%V}d?tD4E{VoP$w|kE7i~!2 zy;K^@p63O4iv3G}1k^BxB0YGXt)ziPCdXO8TQuXznNJfYZQ*`}z=7AmIx%nOb0sF{ zod5Q6EpH?rZ7j6Yq+KU@iGsr#RJTYFCU1CeltiUhHwRyx{axeQeKq|1#rQgCZjUy& z2`>(3L9&JRqe^vOXgAgv*7?j3KBxu_A>6iizDx!nS;aa2Tu=O zDa`2iuvl5fjcW?kydc92zTEQWlh>|+E{zt=a)z+`vYxM#**~TP742^1+KguYEO-^P zptGYu1H>+3zqG%seS=$H_{!c?XP-9KH@fz#6nT8Lw+%6H@*YCLnZA0ajmP?4Eof8; zJ;ye^YrQRR`>7FikLK`=FLXF|;%zIujmJ6eoMb-iR6isGxR+C_-XUF>BQihL1A7%ygE{TwZpOdtcuB(iaK54kw8OuB>WyIXLILxV0{Ak$F#09_ zubm{w=yN4kW;on&8LI&?e~Y`jcJo@e0S6*(_BrA$(hzy{;giG0MhEg*LnnAll}nU% z)R7|WZC@Kn_shP&JpKMf0LQxzTAGsfF8DOb{%>~MBG65zW3tXkOfe5=`>VlI!O!}S z{9Pd51BEgazLcITt_@JW7aX_2bFnkXOK)n7#O-4yIOY2OLxXKn>-OMLl3dM zhXMW5BFon1q#Wtz*9jAuF8cy*GWw2SjDrKMf(OdKKgUkzW#w8O99J@uPwFPEUB#=aU68Wg&bp( zaL5H_c_58^QCZmJq53z$eElK{-N0P4R?0#SEznthaFayi%`WCS zk{p_rjPL*~1MVnI08c|GHSJLc^dTYWY)qIHDHnCW)ga`Y`IB<%<`)k88vmrB_|Yo* zHjRKIVfu10;ms-YnV20-mY$RcsZqu?ug^+H<<~I@R%WO3bpH$T#Uay0HLdTDR&X|L&a>X2t5^RsV7Sr&0p9YHZ*J%~%joua3^w)i~&YfkH=K>zV%+4uPTZ z_>AX&vnJqLaO6b>R6~1D&fEKaiuku4%7%8nIEQzkohi#UWg%VY3E%sYh?Ja6c!;oe z|ALFOJNf@@S0G=d{VI&y^aOBA^9WvJLm`XWj0+V&aJNcoU92*XXq5F;LMlsEcJy@d z&8QzsoSrr`UP(Y6KE@@b3qzNgYt{~QyK-Dh#)n!9x(k_jAcE?7N*mLs2+P%^fj@j* z_P0EeFZqd(bFy$!*sc4>Rh9r_LN{jWZ>r{)Dz~WzK;y8%(@!^(0hvpD@0r6pAb3& /dev/null +! taskkill -F -IM go.exe -T >& /dev/null +! taskkill -F -IM docker.exe -T >& /dev/null + +# Remove everything +! cd /c/jenkins/gopath/src/github.com/docker/docker +! rm -rfd * >& /dev/null +! rm -rfd .* >& /dev/null + +echo INFO: Cleanup complete +exit 0 \ No newline at end of file diff --git a/hack/Jenkins/W2L/setup.sh b/hack/Jenkins/W2L/setup.sh new file mode 100644 index 0000000..ccf176a --- /dev/null +++ b/hack/Jenkins/W2L/setup.sh @@ -0,0 +1,309 @@ +# Jenkins CI script for Windows to Linux CI. +# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable. +set +xe +SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016" + +# TODO to make (even) more resilient: +# - Wait for daemon to be running before executing docker commands +# - Check if jq is installed +# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version +# - Make sure we are not running as local system. Can't do until all Azure nodes are updated. +# - Error if docker versions are not equal. Can't do until all Azure nodes are updated +# - Error if go versions are not equal. Can't do until all Azure nodes are updated. +# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" +# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind +# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP +# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason +# for doing that is that it mirrors the actual release process for docker.exe which is cross-built. +# However, should absolutely not be a problem if built natively, so nit-picking. +# - Tidy up of images and containers. Either here, or in the teardown script. + +ec=0 +uniques=1 +echo INFO: Started at `date`. Script version $SCRIPT_VER + + +# !README! +# There are two daemons running on the remote Linux host: +# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon +# from the sources matching the PR. +# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted +# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376). +# The windows integration tests are run against this inner daemon. + +# get the ip, inner and outer ports. +ip="${DOCKER_HOST#*://}" +port_outer="${ip#*:}" +# inner port is like outer port with last two digits inverted. +port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/') +ip="${ip%%:*}" + +echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner" + +# If TLS is enabled +if [ -n "$DOCKER_TLS_VERIFY" ]; then + protocol=https + if [ -z "$DOCKER_MACHINE_NAME" ]; then + ec=1 + echo "ERROR: DOCKER_MACHINE_NAME is undefined" + fi + certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME) + curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem" + run_extra_args="-v tlscerts:/etc/docker" + daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem" +else + protocol=http +fi + +# Save for use by make.sh and scripts it invokes +export MAIN_DOCKER_HOST="tcp://$ip:$port_inner" + +# Verify we can get the remote node to respond to _ping +if [ $ec -eq 0 ]; then + reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping` + if [ "$reply" != "OK" ]; then + ec=1 + echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node" + echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that" + echo " either the daemon has crashed/is not running, or the Linux node is unavailable." + echo + echo " A regular ping to the remote Linux node is below. It should reply. If not, the" + echo " machine cannot be reached at all and may have crashed. If it does reply, it is" + echo " likely a case of the Linux daemon not running or having crashed, which requires" + echo " further investigation." + echo + echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers" + echo " for someone to perform further diagnostics, or take this node out of rotation." + echo + ping $ip + else + echo "INFO: The Linux nodes outer daemon replied to a ping. Good!" + fi +fi + +# Get the version from the remote node. Note this may fail if jq is not installed. +# That's probably worth checking to make sure, just in case. +if [ $ec -eq 0 ]; then + remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'` + echo "INFO: Remote daemon is running docker version $remoteVersion" +fi + +# Compare versions. We should really fail if result is no 1. Output at end of script. +if [ $ec -eq 0 ]; then + uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l` +fi + +# Make sure we are in repo +if [ $ec -eq 0 ]; then + if [ ! -d hack ]; then + echo "ERROR: Are you sure this is being launched from a the root of docker repository?" + echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker." + echo " Current directory is `pwd`" + ec=1 + fi +fi + +# Are we in split binary mode? +if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then + splitBinary=0 + echo "INFO: Running in single binary mode" +else + splitBinary=1 + echo "INFO: Running in split binary mode" +fi + + +# Get the commit has and verify we have something +if [ $ec -eq 0 ]; then + export COMMITHASH=$(git rev-parse --short HEAD) + echo INFO: Commmit hash is $COMMITHASH + if [ -z $COMMITHASH ]; then + echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" + ec=1 + fi +fi + +# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not +# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment +# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which +# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system... +if [ $ec -eq 0 ]; then + export TEMP=/c/CI/CI-$COMMITHASH + export TMP=$TEMP + /usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p +fi + +# Tidy up time +if [ $ec -eq 0 ]; then + echo INFO: Deleting pre-existing containers and images... + + # Force remove all containers based on a previously built image with this commit + ! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null + + # Force remove any container with this commithash as a name + ! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null + + # This SHOULD never happen, but just in case, also blow away any containers + # that might be around. + ! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then + echo WARN: There were some leftover containers. Cleaning them up. + ! docker rm -f $(docker ps -aq) + fi + + # Force remove the image if it exists + ! docker rmi -f "docker-$COMMITHASH" &>/dev/null +fi + +# Provide the docker version for debugging purposes. If these fail, game over. +# as the Linux box isn't responding for some reason. +if [ $ec -eq 0 ]; then + echo INFO: Docker version and info of the outer daemon on the Linux node + echo + docker version + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# Same as above, but docker info +if [ $ec -eq 0 ]; then + echo + docker info + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# build the daemon image +if [ $ec -eq 0 ]; then + echo "INFO: Running docker build on Linux host at $DOCKER_HOST" + if [ $splitBinary -eq 0 ]; then + set -x + docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" . + cat < +# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ +# +# This script should be executed inside a docker container in privileged mode +# ('docker run --privileged', introduced in docker 0.6). + +# Usage: dind CMD [ARG...] + +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and --privileged mode might break.' + } +fi + +# Mount /tmp (conditionally) +if ! mountpoint -q /tmp; then + mount -t tmpfs none /tmp +fi + +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/hack/dockerfile/binaries-commits b/hack/dockerfile/binaries-commits new file mode 100644 index 0000000..b91775a --- /dev/null +++ b/hack/dockerfile/binaries-commits @@ -0,0 +1,14 @@ +#!/bin/sh + +TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a + +# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly +RUNC_COMMIT=13e66eedaddfbfeda2a73d23701000e4e63b5471 +CONTAINERD_COMMIT=3addd840653146c90a254301d6c3a663c7fd6429 +TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 +LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e +VNDR_COMMIT=c56e082291115e369f77601f9c071dd0b87c7120 + +# CLI +DOCKERCLI_REPO=https://github.com/docker/cli +DOCKERCLI_COMMIT=3dfb8343b139d6342acfd9975d7f1068b5b1c3d3 diff --git a/hack/dockerfile/install-binaries.sh b/hack/dockerfile/install-binaries.sh new file mode 100755 index 0000000..703d787 --- /dev/null +++ b/hack/dockerfile/install-binaries.sh @@ -0,0 +1,125 @@ +#!/bin/sh +set -e +set -x + +. $(dirname "$0")/binaries-commits + +RM_GOPATH=0 + +TMP_GOPATH=${TMP_GOPATH:-""} + +if [ -z "$TMP_GOPATH" ]; then + export GOPATH="$(mktemp -d)" + RM_GOPATH=1 +else + export GOPATH="$TMP_GOPATH" +fi + +# Do not build with ambient capabilities support +RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp apparmor selinux"}" + +install_runc() { + echo "Install runc version $RUNC_COMMIT" + git clone https://github.com/resin-os/runc.git "$GOPATH/src/github.com/opencontainers/runc" + cd "$GOPATH/src/github.com/opencontainers/runc" + git checkout -q "$RUNC_COMMIT" + make BUILDTAGS="$RUNC_BUILDTAGS" $1 + cp runc /usr/local/bin/docker-runc +} + +install_containerd() { + echo "Install containerd version $CONTAINERD_COMMIT" + git clone https://github.com/containerd/containerd.git "$GOPATH/src/github.com/containerd/containerd" + cd "$GOPATH/src/github.com/containerd/containerd" + git checkout -q "$CONTAINERD_COMMIT" + make $1 + cp bin/containerd /usr/local/bin/docker-containerd + cp bin/containerd-shim /usr/local/bin/docker-containerd-shim + cp bin/ctr /usr/local/bin/docker-containerd-ctr +} + +install_proxy() { + echo "Install docker-proxy version $LIBNETWORK_COMMIT" + git clone https://github.com/docker/libnetwork.git "$GOPATH/src/github.com/docker/libnetwork" + cd "$GOPATH/src/github.com/docker/libnetwork" + git checkout -q "$LIBNETWORK_COMMIT" + go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy +} + +install_dockercli() { + echo "Install docker/cli version $DOCKERCLI_COMMIT" + git clone "$DOCKERCLI_REPO" "$GOPATH/src/github.com/docker/cli" + cd "$GOPATH/src/github.com/docker/cli" + git checkout -q "$DOCKERCLI_COMMIT" + go build -o /usr/local/bin/docker github.com/docker/cli/cmd/docker +} + +for prog in "$@" +do + case $prog in + tomlv) + echo "Install tomlv version $TOMLV_COMMIT" + git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" + cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT" + go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv + ;; + + runc) + install_runc static + ;; + + runc-dynamic) + install_runc + ;; + + containerd) + install_containerd static + ;; + + containerd-dynamic) + install_containerd + ;; + + tini) + echo "Install tini version $TINI_COMMIT" + git clone https://github.com/krallin/tini.git "$GOPATH/tini" + cd "$GOPATH/tini" + git checkout -q "$TINI_COMMIT" + cmake . + make tini-static + cp tini-static /usr/local/bin/docker-init + ;; + + proxy) + ( + export CGO_ENABLED=0 + install_proxy + ) + ;; + + proxy-dynamic) + PROXY_LDFLAGS="-linkmode=external" install_proxy + ;; + + vndr) + echo "Install vndr version $VNDR_COMMIT" + git clone https://github.com/LK4D4/vndr.git "$GOPATH/src/github.com/LK4D4/vndr" + cd "$GOPATH/src/github.com/LK4D4/vndr" + git checkout -q "$VNDR_COMMIT" + go build -v -o /usr/local/bin/vndr . + ;; + + dockercli) + install_dockercli + ;; + + *) + echo echo "Usage: $0 [tomlv|runc|runc-dynamic|containerd|containerd-dynamic|tini|proxy|proxy-dynamic|vndr|dockercli]" + exit 1 + + esac +done + +if [ $RM_GOPATH -eq 1 ]; then + rm -rf "$GOPATH" +fi diff --git a/hack/generate-authors.sh b/hack/generate-authors.sh new file mode 100755 index 0000000..680bdb7 --- /dev/null +++ b/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/hack/generate-swagger-api.sh b/hack/generate-swagger-api.sh new file mode 100755 index 0000000..9bbd8de --- /dev/null +++ b/hack/generate-swagger-api.sh @@ -0,0 +1,27 @@ +#!/bin/sh +set -eu + +swagger generate model -f api/swagger.yaml \ + -t api -m types --skip-validator -C api/swagger-gen.yaml \ + -n ErrorResponse \ + -n GraphDriverData \ + -n IdResponse \ + -n ImageDeleteResponseItem \ + -n ImageSummary \ + -n Plugin -n PluginDevice -n PluginMount -n PluginEnv -n PluginInterfaceType \ + -n Port \ + -n ServiceUpdateResponse \ + -n Volume + +swagger generate operation -f api/swagger.yaml \ + -t api -a types -m types -C api/swagger-gen.yaml \ + -T api/templates --skip-responses --skip-parameters --skip-validator \ + -n Authenticate \ + -n ContainerChanges \ + -n ContainerCreate \ + -n ContainerTop \ + -n ContainerUpdate \ + -n ContainerWait \ + -n ImageHistory \ + -n VolumesCreate \ + -n VolumesList diff --git a/hack/install.sh b/hack/install.sh new file mode 100644 index 0000000..12a9b3e --- /dev/null +++ b/hack/install.sh @@ -0,0 +1,544 @@ +#!/bin/sh +set -e +# +# This script is meant for quick & easy install via: +# 'curl -sSL https://get.docker.com/ | sh' +# or: +# 'wget -qO- https://get.docker.com/ | sh' +# +# For test builds (ie. release candidates): +# 'curl -fsSL https://test.docker.com/ | sh' +# or: +# 'wget -qO- https://test.docker.com/ | sh' +# +# For experimental builds: +# 'curl -fsSL https://experimental.docker.com/ | sh' +# or: +# 'wget -qO- https://experimental.docker.com/ | sh' +# +# Docker Maintainers: +# To update this script on https://get.docker.com, +# use hack/release.sh during a normal release, +# or the following one-liner for script hotfixes: +# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index +# + +url="https://get.docker.com/" +apt_url="https://apt.dockerproject.org" +yum_url="https://yum.dockerproject.org" + +docker_key="-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o +ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R +mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn +TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK +dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT +X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG +HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c +NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ +hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U +65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM +zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB +tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv +Y2tlci5jb20+iQIcBBABCgAGBQJWw7vdAAoJEFyzYeVS+w0QHysP/i37m4SyoOCV +cnybl18vzwBEcp4VCRbXvHvOXty1gccVIV8/aJqNKgBV97lY3vrpOyiIeB8ETQeg +srxFE7t/Gz0rsLObqfLEHdmn5iBJRkhLfCpzjeOnyB3Z0IJB6UogO/msQVYe5CXJ +l6uwr0AmoiCBLrVlDAktxVh9RWch0l0KZRX2FpHu8h+uM0/zySqIidlYfLa3y5oH +scU+nGU1i6ImwDTD3ysZC5jp9aVfvUmcESyAb4vvdcAHR+bXhA/RW8QHeeMFliWw +7Z2jYHyuHmDnWG2yUrnCqAJTrWV+OfKRIzzJFBs4e88ru5h2ZIXdRepw/+COYj34 +LyzxR2cxr2u/xvxwXCkSMe7F4KZAphD+1ws61FhnUMi/PERMYfTFuvPrCkq4gyBj +t3fFpZ2NR/fKW87QOeVcn1ivXl9id3MMs9KXJsg7QasT7mCsee2VIFsxrkFQ2jNp +D+JAERRn9Fj4ArHL5TbwkkFbZZvSi6fr5h2GbCAXIGhIXKnjjorPY/YDX6X8AaHO +W1zblWy/CFr6VFl963jrjJgag0G6tNtBZLrclZgWhOQpeZZ5Lbvz2ZA5CqRrfAVc +wPNW1fObFIRtqV6vuVluFOPCMAAnOnqR02w9t17iVQjO3oVN0mbQi9vjuExXh1Yo +ScVetiO6LSmlQfVEVRTqHLMgXyR/EMo7iQIcBBABCgAGBQJXSWBlAAoJEFyzYeVS ++w0QeH0QAI6btAfYwYPuAjfRUy9qlnPhZ+xt1rnwsUzsbmo8K3XTNh+l/R08nu0d +sczw30Q1wju28fh1N8ay223+69f0+yICaXqR18AbGgFGKX7vo0gfEVaxdItUN3eH +NydGFzmeOKbAlrxIMECnSTG/TkFVYO9Ntlv9vSN2BupmTagTRErxLZKnVsWRzp+X +elwlgU5BCZ6U6Ze8+bIc6F1bZstf17X8i6XNV/rOCLx2yP0hn1osoljoLPpW8nzk +wvqYsYbCA28lMt1aqe0UWvRCqR0zxlKn17NZQqjbxcajEMCajoQ01MshmO5GWePV +iv2abCZ/iaC5zKqVT3deMJHLq7lum6qhA41E9gJH9QoqT+qgadheeFfoC1QP7cke ++tXmYg2R39p3l5Hmm+JQbP4f9V5mpWExvHGCSbcatr35tnakIJZugq2ogzsm1djC +Sz9222RXl9OoFqsm1bNzA78+/cOt5N2cyhU0bM2T/zgh42YbDD+JDU/HSmxUIpU+ +wrGvZGM2FU/up0DRxOC4U1fL6HHlj8liNJWfEg3vhougOh66gGF9ik5j4eIlNoz6 +lst+gmvlZQ9/9hRDeoG+AbhZeIlQ4CCw+Y1j/+fUxIzKHPVK+aFJd+oJVNvbojJW +/SgDdSMtFwqOvXyYcHl30Ws0gZUeDyAmNGZeJ3kFklnApDmeKK+OiQIiBBABCgAM +BQJXe5zTBYMHhh+AAAoJEDG4FaMBBnSp7YMQAJqrXoBonZAq07B6qUaT3aBCgnY4 +JshbXmFb/XrrS75f7YJDPx2fJJdqrbYDIHHgOjzxvp3ngPpOpJzI5sYmkaugeoCO +/KHu/+39XqgTB7fguzapRfbvuWp+qzPcHSdb9opnagfzKAze3DQnnLiwCPlsyvGp +zC4KzXgV2ze/4raaOye1kK7O0cHyapmn/q/TR3S8YapyXq5VpLThwJAw1SRDu0Yx +eXIAQiIfaSxT79EktoioW2CSV8/djt+gBjXnKYJJA8P1zzX7GNt/Rc2YG0Ot4v6t +BW16xqFTg+n5JzbeK5cZ1jbIXXfCcaZJyiM2MzYGhSJ9+EV7JYF05OAIWE4SGTRj +XMquQ2oMLSwMCPQHm+FCD9PXQ0tHYx6tKT34wksdmoWsdejl/n3NS+178mG1WI/l +N079h3im2gRwOykMou/QWs3vGw/xDoOYHPV2gJ7To9BLVnVK/hROgdFLZFeyRScN +zwKm57HmYMFA74tX601OiHhk1ymP2UUc25oDWpLXlfcRULJJlo/KfZZF3pmKwIq3 +CilGayFUi1NNwuavG76EcAVtVFUVFFIITwkhkuRbBHIytzEHYosFgD5/acK0Pauq +JnwrwKv0nWq3aK7nKiALAD+iZvPNjFZau3/APqLEmvmRnAElmugcHsWREFxMMjMM +VgYFiYKUAJO8u46eiQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgID +AQIeAQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0 +CH+nAk40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj +9A4I1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlS +C4SluyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQe +bTGv0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4 +Aal8L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08 +GkzDYBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn +6oOR7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA +/Zxcjk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5 +HWXPHXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1s +FVELMXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1g +EJOQTvBR8Q== +=Yhur +-----END PGP PUBLIC KEY BLOCK----- +" + +mirror='' +while [ $# -gt 0 ]; do + case "$1" in + --mirror) + mirror="$2" + shift + ;; + *) + echo "Illegal option $1" + ;; + esac + shift $(( $# > 0 ? 1 : 0 )) +done + +case "$mirror" in + AzureChinaCloud) + apt_url="https://mirror.azure.cn/docker-engine/apt" + yum_url="https://mirror.azure.cn/docker-engine/yum" + ;; + Aliyun) + apt_url="https://mirrors.aliyun.com/docker-engine/apt" + yum_url="https://mirrors.aliyun.com/docker-engine/yum" + ;; +esac + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +echo_docker_as_nonroot() { + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + cat <<-EOF + + If you would like to use Docker as a non-root user, you should now consider + adding your user to the "docker" group with something like: + + sudo usermod -aG docker $your_user + + Remember that you will have to log out and back in for this to take effect! + + WARNING: Adding a user to the "docker" group will grant the ability to run + containers which can be used to obtain root privileges on the + docker host. + Refer to https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface + for more information. + + EOF +} + +# Check if this is a forked Linux distro +check_forked() { + + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + set +e + lsb_release -a -u > /dev/null 2>&1 + lsb_release_exit_code=$? + set -e + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$lsb_release_exit_code" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + else + if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then + # We're Debian and don't even know it! + lsb_dist=debian + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 9) + dist_version="stretch" + ;; + 8|'Kali Linux 2') + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + fi + fi + fi +} + +semverParse() { + major="${1%%.*}" + minor="${1#$major.}" + minor="${minor%%.*}" + patch="${1#$major.$minor.}" + patch="${patch%%[-.]*}" +} + +do_install() { + architecture=$(uname -m) + case $architecture in + # officially supported + amd64|x86_64) + ;; + # unofficially supported with available repositories + armv6l|armv7l) + ;; + # unofficially supported without available repositories + aarch64|arm64|ppc64le|s390x) + cat 1>&2 <<-EOF + Error: This install script does not support $architecture, because no + $architecture package exists in Docker's repositories. + + Other install options include checking your distribution's package repository + for a version of Docker, or building Docker from source. + EOF + exit 1 + ;; + # not supported + *) + cat >&2 <<-EOF + Error: $architecture is not a recognized platform. + EOF + exit 1 + ;; + esac + + if command_exists docker; then + version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)" + MAJOR_W=1 + MINOR_W=10 + + semverParse $version + + shouldWarn=0 + if [ $major -lt $MAJOR_W ]; then + shouldWarn=1 + fi + + if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then + shouldWarn=1 + fi + + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + EOF + + if [ $shouldWarn -eq 1 ]; then + cat >&2 <<-'EOF' + again to update Docker, we urge you to migrate your image store before upgrading + to v1.10+. + + You can find instructions for this here: + https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration + EOF + else + cat >&2 <<-'EOF' + again to update Docker, you can safely ignore this message. + EOF + fi + + cat >&2 <<-'EOF' + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + curl='' + if command_exists curl; then + curl='curl -sSL' + elif command_exists wget; then + curl='wget -qO-' + elif command_exists busybox && busybox --list-modules | grep -q wget; then + curl='busybox wget -qO-' + fi + + # check to see which repo they are trying to install from + if [ -z "$repo" ]; then + repo='main' + if [ "https://test.docker.com/" = "$url" ]; then + repo='testing' + elif [ "https://experimental.docker.com/" = "$url" ]; then + repo='experimental' + fi + fi + + # perform some very rudimentary platform detection + lsb_dist='' + dist_version='' + if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then + lsb_dist='debian' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='fedora' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then + lsb_dist='oracleserver' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then + lsb_dist='centos' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then + lsb_dist='redhat' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/photon-release ]; then + lsb_dist='photon' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + # Special case redhatenterpriseserver + if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then + # Set it to redhat, it will be changed to centos below anyways + lsb_dist='redhat' + fi + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian|raspbian) + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + ;; + + oracleserver) + # need to switch lsb_dist to match yum repo URL + lsb_dist="oraclelinux" + dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" + ;; + + fedora|centos|redhat) + dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)" + ;; + + "vmware photon") + lsb_dist="photon" + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + + esac + + # Check if this is a forked Linux distro + check_forked + + # Run setup for each distro accordingly + case "$lsb_dist" in + ubuntu|debian|raspbian) + export DEBIAN_FRONTEND=noninteractive + + did_apt_get_update= + apt_get_update() { + if [ -z "$did_apt_get_update" ]; then + ( set -x; $sh_c 'sleep 3; apt-get update' ) + did_apt_get_update=1 + fi + } + + if [ "$lsb_dist" != "raspbian" ]; then + # aufs is preferred over devicemapper; try to ensure the driver is available. + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then + kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" + + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true + + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' + echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' + ( set -x; sleep 10 ) + fi + else + echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' + echo >&2 ' package. We have no AUFS support. Consider installing the packages' + echo >&2 ' "linux-image-virtual" and "linux-image-extra-virtual" for AUFS support.' + ( set -x; sleep 10 ) + fi + fi + fi + + # install apparmor utils if they're missing and apparmor is enabled in the kernel + # otherwise Docker will fail to start + if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + if command -v apparmor_parser >/dev/null 2>&1; then + echo 'apparmor is enabled in the kernel and apparmor utils were already installed' + else + echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..' + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + fi + fi + + if [ ! -e /usr/lib/apt/methods/https ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) + fi + if [ -z "$curl" ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) + curl='curl -sSL' + fi + if ! command -v gpg > /dev/null; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' ) + fi + + # dirmngr is a separate package in ubuntu yakkety; see https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1634464 + if ! command -v dirmngr > /dev/null; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q dirmngr' ) + fi + + ( + set -x + echo "$docker_key" | $sh_c 'apt-key add -' + $sh_c "mkdir -p /etc/apt/sources.list.d" + $sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" + $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' + ) + echo_docker_as_nonroot + exit 0 + ;; + + fedora|centos|redhat|oraclelinux|photon) + if [ "${lsb_dist}" = "redhat" ]; then + # we use the centos repository for both redhat and centos releases + lsb_dist='centos' + fi + $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF + [docker-${repo}-repo] + name=Docker ${repo} Repository + baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version} + enabled=1 + gpgcheck=1 + gpgkey=${yum_url}/gpg + EOF + if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then + ( + set -x + $sh_c 'sleep 3; dnf -y -q install docker-engine' + ) + elif [ "$lsb_dist" = "photon" ]; then + ( + set -x + $sh_c 'sleep 3; tdnf -y install docker-engine' + ) + else + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-engine' + ) + fi + echo_docker_as_nonroot + exit 0 + ;; + esac + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output + cat >&2 <<-'EOF' + + Either your platform is not easily detectable, is not supported by this + installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have + a package for Docker. Please visit the following URL for more detailed + installation instructions: + + https://docs.docker.com/engine/installation/ + + EOF + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/hack/integration-cli-on-swarm/README.md b/hack/integration-cli-on-swarm/README.md new file mode 100644 index 0000000..a09fa2f --- /dev/null +++ b/hack/integration-cli-on-swarm/README.md @@ -0,0 +1,69 @@ +# Integration Testing on Swarm + +IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster + +## Architecture + +### Master service + + - Works as a funker caller + - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`) + +### Worker service + + - Works as a funker callee + - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`) + +### Client + + - Controls master and workers via `docker stack` + - No need to have a local daemon + +Typically, the master and workers are supposed to be running on a cloud environment, +while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows. + +## Requirement + + - Docker daemon 1.13 or later + - Private registry for distributed execution with multiple nodes + +## Usage + +### Step 1: Prepare images + + $ make build-integration-cli-on-swarm + +Following environment variables are known to work in this step: + + - `BUILDFLAGS` + - `DOCKER_INCREMENTAL_BINARY` + +Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. + +### Step 2: Execute tests + + $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest + +Following environment variables are known to work in this step: + + - `DOCKER_GRAPHDRIVER` + - `DOCKER_EXPERIMENTAL` + +#### Flags + +Basic flags: + + - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism. + - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`. + - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`. + +Experimental flags for mitigating makespan nonuniformity: + + - `-shuffle`: Shuffle the test filter strings + +Flags for debugging IT on Swarm itself: + + - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used. + - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated. + - `-dry-run`: skip the actual workload + - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm diff --git a/hack/integration-cli-on-swarm/agent/Dockerfile b/hack/integration-cli-on-swarm/agent/Dockerfile new file mode 100644 index 0000000..c2bc2f1 --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/Dockerfile @@ -0,0 +1,6 @@ +# this Dockerfile is solely used for the master image. +# Please refer to the top-level Makefile for the worker image. +FROM golang:1.7 +ADD . /go/src/github.com/docker/docker/hack/integration-cli-on-swarm/agent +RUN go build -o /master github.com/docker/docker/hack/integration-cli-on-swarm/agent/master +ENTRYPOINT ["/master"] diff --git a/hack/integration-cli-on-swarm/agent/master/call.go b/hack/integration-cli-on-swarm/agent/master/call.go new file mode 100644 index 0000000..858c2c0 --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/master/call.go @@ -0,0 +1,132 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/bfirsh/funker-go" + "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types" +) + +const ( + // funkerRetryTimeout is for the issue https://github.com/bfirsh/funker/issues/3 + // When all the funker replicas are busy in their own job, we cannot connect to funker. + funkerRetryTimeout = 1 * time.Hour + funkerRetryDuration = 1 * time.Second +) + +// ticker is needed for some CI (e.g., on Travis, job is aborted when no output emitted for 10 minutes) +func ticker(d time.Duration) chan struct{} { + t := time.NewTicker(d) + stop := make(chan struct{}) + go func() { + for { + select { + case <-t.C: + log.Printf("tick (just for keeping CI job active) per %s", d.String()) + case <-stop: + t.Stop() + } + } + }() + return stop +} + +func executeTests(funkerName string, testChunks [][]string) error { + tickerStopper := ticker(9*time.Minute + 55*time.Second) + defer func() { + close(tickerStopper) + }() + begin := time.Now() + log.Printf("Executing %d chunks in parallel, against %q", len(testChunks), funkerName) + var wg sync.WaitGroup + var passed, failed uint32 + for chunkID, tests := range testChunks { + log.Printf("Executing chunk %d (contains %d test filters)", chunkID, len(tests)) + wg.Add(1) + go func(chunkID int, tests []string) { + defer wg.Done() + chunkBegin := time.Now() + result, err := executeTestChunkWithRetry(funkerName, types.Args{ + ChunkID: chunkID, + Tests: tests, + }) + if result.RawLog != "" { + for _, s := range strings.Split(result.RawLog, "\n") { + log.Printf("Log (chunk %d): %s", chunkID, s) + } + } + if err != nil { + log.Printf("Error while executing chunk %d: %v", + chunkID, err) + atomic.AddUint32(&failed, 1) + } else { + if result.Code == 0 { + atomic.AddUint32(&passed, 1) + } else { + atomic.AddUint32(&failed, 1) + } + log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.", + chunkID, passed+failed, len(testChunks), len(tests), + time.Now().Sub(chunkBegin), result.Code) + } + }(chunkID, tests) + } + wg.Wait() + // TODO: print actual tests rather than chunks + log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.", + len(testChunks), time.Now().Sub(begin), passed, failed) + if failed > 0 { + return fmt.Errorf("%d chunks failed", failed) + } + return nil +} + +func executeTestChunk(funkerName string, args types.Args) (types.Result, error) { + ret, err := funker.Call(funkerName, args) + if err != nil { + return types.Result{}, err + } + tmp, err := json.Marshal(ret) + if err != nil { + return types.Result{}, err + } + var result types.Result + err = json.Unmarshal(tmp, &result) + return result, err +} + +func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) { + begin := time.Now() + for i := 0; time.Now().Sub(begin) < funkerRetryTimeout; i++ { + result, err := executeTestChunk(funkerName, args) + if err == nil { + log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i) + return result, nil + } + if errorSeemsInteresting(err) { + log.Printf("Error while calling executeTestChunk(%q, %d), will retry (trial %d): %v", + funkerName, args.ChunkID, i, err) + } + // TODO: non-constant sleep + time.Sleep(funkerRetryDuration) + } + return types.Result{}, fmt.Errorf("could not call executeTestChunk(%q, %d) in %v", funkerName, args.ChunkID, funkerRetryTimeout) +} + +// errorSeemsInteresting returns true if err does not seem about https://github.com/bfirsh/funker/issues/3 +func errorSeemsInteresting(err error) bool { + boringSubstrs := []string{"connection refused", "connection reset by peer", "no such host", "transport endpoint is not connected", "no route to host"} + errS := err.Error() + for _, boringS := range boringSubstrs { + if strings.Contains(errS, boringS) { + return false + } + } + return true +} diff --git a/hack/integration-cli-on-swarm/agent/master/master.go b/hack/integration-cli-on-swarm/agent/master/master.go new file mode 100644 index 0000000..a0d9a0d --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/master/master.go @@ -0,0 +1,65 @@ +package main + +import ( + "errors" + "flag" + "io/ioutil" + "log" + "strings" +) + +func main() { + if err := xmain(); err != nil { + log.Fatalf("fatal error: %v", err) + } +} + +func xmain() error { + workerService := flag.String("worker-service", "", "Name of worker service") + chunks := flag.Int("chunks", 0, "Number of chunks") + input := flag.String("input", "", "Path to input file") + randSeed := flag.Int64("rand-seed", int64(0), "Random seed") + shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") + flag.Parse() + if *workerService == "" { + return errors.New("worker-service unset") + } + if *chunks == 0 { + return errors.New("chunks unset") + } + if *input == "" { + return errors.New("input unset") + } + + tests, err := loadTests(*input) + if err != nil { + return err + } + testChunks := chunkTests(tests, *chunks, *shuffle, *randSeed) + log.Printf("Loaded %d tests (%d chunks)", len(tests), len(testChunks)) + return executeTests(*workerService, testChunks) +} + +func chunkTests(tests []string, numChunks int, shuffle bool, randSeed int64) [][]string { + // shuffling (experimental) mitigates makespan nonuniformity + // Not sure this can cause some locality problem.. + if shuffle { + shuffleStrings(tests, randSeed) + } + return chunkStrings(tests, numChunks) +} + +func loadTests(filename string) ([]string, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var tests []string + for _, line := range strings.Split(string(b), "\n") { + s := strings.TrimSpace(line) + if s != "" { + tests = append(tests, s) + } + } + return tests, nil +} diff --git a/hack/integration-cli-on-swarm/agent/master/set.go b/hack/integration-cli-on-swarm/agent/master/set.go new file mode 100644 index 0000000..d28c41d --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/master/set.go @@ -0,0 +1,28 @@ +package main + +import ( + "math/rand" +) + +// chunkStrings chunks the string slice +func chunkStrings(x []string, numChunks int) [][]string { + var result [][]string + chunkSize := (len(x) + numChunks - 1) / numChunks + for i := 0; i < len(x); i += chunkSize { + ub := i + chunkSize + if ub > len(x) { + ub = len(x) + } + result = append(result, x[i:ub]) + } + return result +} + +// shuffleStrings shuffles strings +func shuffleStrings(x []string, seed int64) { + r := rand.New(rand.NewSource(seed)) + for i := range x { + j := r.Intn(i + 1) + x[i], x[j] = x[j], x[i] + } +} diff --git a/hack/integration-cli-on-swarm/agent/master/set_test.go b/hack/integration-cli-on-swarm/agent/master/set_test.go new file mode 100644 index 0000000..dfb7a0b --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/master/set_test.go @@ -0,0 +1,63 @@ +package main + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func generateInput(inputLen int) []string { + input := []string{} + for i := 0; i < inputLen; i++ { + input = append(input, fmt.Sprintf("s%d", i)) + } + + return input +} + +func testChunkStrings(t *testing.T, inputLen, numChunks int) { + t.Logf("inputLen=%d, numChunks=%d", inputLen, numChunks) + input := generateInput(inputLen) + result := chunkStrings(input, numChunks) + t.Logf("result has %d chunks", len(result)) + inputReconstructedFromResult := []string{} + for i, chunk := range result { + t.Logf("chunk %d has %d elements", i, len(chunk)) + inputReconstructedFromResult = append(inputReconstructedFromResult, chunk...) + } + if !reflect.DeepEqual(input, inputReconstructedFromResult) { + t.Fatal("input != inputReconstructedFromResult") + } +} + +func TestChunkStrings_4_4(t *testing.T) { + testChunkStrings(t, 4, 4) +} + +func TestChunkStrings_4_1(t *testing.T) { + testChunkStrings(t, 4, 1) +} + +func TestChunkStrings_1_4(t *testing.T) { + testChunkStrings(t, 1, 4) +} + +func TestChunkStrings_1000_8(t *testing.T) { + testChunkStrings(t, 1000, 8) +} + +func TestChunkStrings_1000_9(t *testing.T) { + testChunkStrings(t, 1000, 9) +} + +func testShuffleStrings(t *testing.T, inputLen int, seed int64) { + t.Logf("inputLen=%d, seed=%d", inputLen, seed) + x := generateInput(inputLen) + shuffleStrings(x, seed) + t.Logf("shuffled: %v", x) +} + +func TestShuffleStrings_100(t *testing.T) { + testShuffleStrings(t, 100, time.Now().UnixNano()) +} diff --git a/hack/integration-cli-on-swarm/agent/types/types.go b/hack/integration-cli-on-swarm/agent/types/types.go new file mode 100644 index 0000000..fc598f0 --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/types/types.go @@ -0,0 +1,18 @@ +package types + +// Args is the type for funker args +type Args struct { + // ChunkID is an unique number of the chunk + ChunkID int `json:"chunk_id"` + // Tests is the set of the strings that are passed as `-check.f` filters + Tests []string `json:"tests"` +} + +// Result is the type for funker result +type Result struct { + // ChunkID corresponds to Args.ChunkID + ChunkID int `json:"chunk_id"` + // Code is the exit code + Code int `json:"code"` + RawLog string `json:"raw_log"` +} diff --git a/hack/integration-cli-on-swarm/agent/vendor.conf b/hack/integration-cli-on-swarm/agent/vendor.conf new file mode 100644 index 0000000..efd6d6d --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/vendor.conf @@ -0,0 +1,2 @@ +# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here +github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773 diff --git a/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE new file mode 100644 index 0000000..75191a4 --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go new file mode 100644 index 0000000..ac0338d --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go @@ -0,0 +1,50 @@ +package funker + +import ( + "encoding/json" + "io/ioutil" + "net" + "time" +) + +// Call a Funker function +func Call(name string, args interface{}) (interface{}, error) { + argsJSON, err := json.Marshal(args) + if err != nil { + return nil, err + } + + addr, err := net.ResolveTCPAddr("tcp", name+":9999") + if err != nil { + return nil, err + } + + conn, err := net.DialTCP("tcp", nil, addr) + if err != nil { + return nil, err + } + // Keepalive is a workaround for docker/docker#29655 . + // The implementation of FIN_WAIT2 seems weird on Swarm-mode. + // It seems always refuseing any packet after 60 seconds. + // + // TODO: remove this workaround if the issue gets resolved on the Docker side + if err := conn.SetKeepAlive(true); err != nil { + return nil, err + } + if err := conn.SetKeepAlivePeriod(30 * time.Second); err != nil { + return nil, err + } + if _, err = conn.Write(argsJSON); err != nil { + return nil, err + } + if err = conn.CloseWrite(); err != nil { + return nil, err + } + retJSON, err := ioutil.ReadAll(conn) + if err != nil { + return nil, err + } + var ret interface{} + err = json.Unmarshal(retJSON, &ret) + return ret, err +} diff --git a/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go new file mode 100644 index 0000000..8987899 --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go @@ -0,0 +1,54 @@ +package funker + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "reflect" +) + +// Handle a Funker function. +func Handle(handler interface{}) error { + handlerValue := reflect.ValueOf(handler) + handlerType := handlerValue.Type() + if handlerType.Kind() != reflect.Func || handlerType.NumIn() != 1 || handlerType.NumOut() != 1 { + return fmt.Errorf("Handler must be a function with a single parameter and single return value.") + } + argsValue := reflect.New(handlerType.In(0)) + + listener, err := net.Listen("tcp", ":9999") + if err != nil { + return err + } + conn, err := listener.Accept() + if err != nil { + return err + } + // We close listener, because we only allow single request. + // Note that TCP "backlog" cannot be used for that purpose. + // http://www.perlmonks.org/?node_id=940662 + if err = listener.Close(); err != nil { + return err + } + argsJSON, err := ioutil.ReadAll(conn) + if err != nil { + return err + } + err = json.Unmarshal(argsJSON, argsValue.Interface()) + if err != nil { + return err + } + + ret := handlerValue.Call([]reflect.Value{argsValue.Elem()})[0].Interface() + retJSON, err := json.Marshal(ret) + if err != nil { + return err + } + + if _, err = conn.Write(retJSON); err != nil { + return err + } + + return conn.Close() +} diff --git a/hack/integration-cli-on-swarm/agent/worker/executor.go b/hack/integration-cli-on-swarm/agent/worker/executor.go new file mode 100644 index 0000000..3442b09 --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/worker/executor.go @@ -0,0 +1,118 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +// testChunkExecutor executes integration-cli binary. +// image needs to be the worker image itself. testFlags are OR-set of regexp for filtering tests. +type testChunkExecutor func(image string, tests []string) (int64, string, error) + +func dryTestChunkExecutor() testChunkExecutor { + return func(image string, tests []string) (int64, string, error) { + return 0, fmt.Sprintf("DRY RUN (image=%q, tests=%v)", image, tests), nil + } +} + +// privilegedTestChunkExecutor invokes a privileged container from the worker +// service via bind-mounted API socket so as to execute the test chunk +func privilegedTestChunkExecutor(autoRemove bool) testChunkExecutor { + return func(image string, tests []string) (int64, string, error) { + cli, err := client.NewEnvClient() + if err != nil { + return 0, "", err + } + // propagate variables from the host (needs to be defined in the compose file) + experimental := os.Getenv("DOCKER_EXPERIMENTAL") + graphdriver := os.Getenv("DOCKER_GRAPHDRIVER") + if graphdriver == "" { + info, err := cli.Info(context.Background()) + if err != nil { + return 0, "", err + } + graphdriver = info.Driver + } + // `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration-cli`) + // but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work. + // + // Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs` + // + // see integration-cli/daemon/daemon.go + daemonDest := "/daemon_dest" + config := container.Config{ + Image: image, + Env: []string{ + "TESTFLAGS=-check.f " + strings.Join(tests, "|"), + "KEEPBUNDLE=1", + "DOCKER_INTEGRATION_TESTS_VERIFIED=1", // for avoiding rebuilding integration-cli + "DOCKER_EXPERIMENTAL=" + experimental, + "DOCKER_GRAPHDRIVER=" + graphdriver, + "DOCKER_INTEGRATION_DAEMON_DEST=" + daemonDest, + }, + Labels: map[string]string{ + "org.dockerproject.integration-cli-on-swarm": "", + "org.dockerproject.integration-cli-on-swarm.comment": "this non-service container is created for running privileged programs on Swarm. you can remove this container manually if the corresponding service is already stopped.", + }, + Entrypoint: []string{"hack/dind"}, + Cmd: []string{"hack/make.sh", "test-integration-cli"}, + } + hostConfig := container.HostConfig{ + AutoRemove: autoRemove, + Privileged: true, + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Target: daemonDest, + }, + }, + } + id, stream, err := runContainer(context.Background(), cli, config, hostConfig) + if err != nil { + return 0, "", err + } + var b bytes.Buffer + teeContainerStream(&b, os.Stdout, os.Stderr, stream) + resultC, errC := cli.ContainerWait(context.Background(), id, "") + select { + case err := <-errC: + return 0, "", err + case result := <-resultC: + return result.StatusCode, b.String(), nil + } + } +} + +func runContainer(ctx context.Context, cli *client.Client, config container.Config, hostConfig container.HostConfig) (string, io.ReadCloser, error) { + created, err := cli.ContainerCreate(context.Background(), + &config, &hostConfig, nil, "") + if err != nil { + return "", nil, err + } + if err = cli.ContainerStart(ctx, created.ID, types.ContainerStartOptions{}); err != nil { + return "", nil, err + } + stream, err := cli.ContainerLogs(ctx, + created.ID, + types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + }) + return created.ID, stream, err +} + +func teeContainerStream(w, stdout, stderr io.Writer, stream io.ReadCloser) { + stdcopy.StdCopy(io.MultiWriter(w, stdout), io.MultiWriter(w, stderr), stream) + stream.Close() +} diff --git a/hack/integration-cli-on-swarm/agent/worker/worker.go b/hack/integration-cli-on-swarm/agent/worker/worker.go new file mode 100644 index 0000000..36ab368 --- /dev/null +++ b/hack/integration-cli-on-swarm/agent/worker/worker.go @@ -0,0 +1,69 @@ +package main + +import ( + "flag" + "fmt" + "log" + "time" + + "github.com/bfirsh/funker-go" + "github.com/docker/distribution/reference" + "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types" +) + +func main() { + if err := xmain(); err != nil { + log.Fatalf("fatal error: %v", err) + } +} + +func validImageDigest(s string) bool { + return reference.DigestRegexp.FindString(s) != "" +} + +func xmain() error { + workerImageDigest := flag.String("worker-image-digest", "", "Needs to be the digest of this worker image itself") + dryRun := flag.Bool("dry-run", false, "Dry run") + keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") + flag.Parse() + if !validImageDigest(*workerImageDigest) { + // Because of issue #29582. + // `docker service create localregistry.example.com/blahblah:latest` pulls the image data to local, but not a tag. + // So, `docker run localregistry.example.com/blahblah:latest` fails: `Unable to find image 'localregistry.example.com/blahblah:latest' locally` + return fmt.Errorf("worker-image-digest must be a digest, got %q", *workerImageDigest) + } + executor := privilegedTestChunkExecutor(!*keepExecutor) + if *dryRun { + executor = dryTestChunkExecutor() + } + return handle(*workerImageDigest, executor) +} + +func handle(workerImageDigest string, executor testChunkExecutor) error { + log.Printf("Waiting for a funker request") + return funker.Handle(func(args *types.Args) types.Result { + log.Printf("Executing chunk %d, contains %d test filters", + args.ChunkID, len(args.Tests)) + begin := time.Now() + code, rawLog, err := executor(workerImageDigest, args.Tests) + if err != nil { + log.Printf("Error while executing chunk %d: %v", args.ChunkID, err) + if code == 0 { + // Make sure this is a failure + code = 1 + } + return types.Result{ + ChunkID: args.ChunkID, + Code: int(code), + RawLog: rawLog, + } + } + elapsed := time.Now().Sub(begin) + log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed) + return types.Result{ + ChunkID: args.ChunkID, + Code: int(code), + RawLog: rawLog, + } + }) +} diff --git a/hack/integration-cli-on-swarm/host/compose.go b/hack/integration-cli-on-swarm/host/compose.go new file mode 100644 index 0000000..a92282a --- /dev/null +++ b/hack/integration-cli-on-swarm/host/compose.go @@ -0,0 +1,122 @@ +package main + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "text/template" + + "github.com/docker/docker/client" +) + +const composeTemplate = `# generated by integration-cli-on-swarm +version: "3" + +services: + worker: + image: "{{.WorkerImage}}" + command: ["-worker-image-digest={{.WorkerImageDigest}}", "-dry-run={{.DryRun}}", "-keep-executor={{.KeepExecutor}}"] + networks: + - net + volumes: +# Bind-mount the API socket so that we can invoke "docker run --privileged" within the service containers + - /var/run/docker.sock:/var/run/docker.sock + environment: + - DOCKER_GRAPHDRIVER={{.EnvDockerGraphDriver}} + - DOCKER_EXPERIMENTAL={{.EnvDockerExperimental}} + deploy: + mode: replicated + replicas: {{.Replicas}} + restart_policy: +# The restart condition needs to be any for funker function + condition: any + + master: + image: "{{.MasterImage}}" + command: ["-worker-service=worker", "-input=/mnt/input", "-chunks={{.Chunks}}", "-shuffle={{.Shuffle}}", "-rand-seed={{.RandSeed}}"] + networks: + - net + volumes: + - {{.Volume}}:/mnt + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: none + placement: +# Make sure the master can access the volume + constraints: [node.id == {{.SelfNodeID}}] + +networks: + net: + +volumes: + {{.Volume}}: + external: true +` + +type composeOptions struct { + Replicas int + Chunks int + MasterImage string + WorkerImage string + Volume string + Shuffle bool + RandSeed int64 + DryRun bool + KeepExecutor bool +} + +type composeTemplateOptions struct { + composeOptions + WorkerImageDigest string + SelfNodeID string + EnvDockerGraphDriver string + EnvDockerExperimental string +} + +// createCompose creates "dir/docker-compose.yml". +// If dir is empty, TempDir() is used. +func createCompose(dir string, cli *client.Client, opts composeOptions) (string, error) { + if dir == "" { + var err error + dir, err = ioutil.TempDir("", "integration-cli-on-swarm-") + if err != nil { + return "", err + } + } + resolved := composeTemplateOptions{} + resolved.composeOptions = opts + workerImageInspect, _, err := cli.ImageInspectWithRaw(context.Background(), defaultWorkerImageName) + if err != nil { + return "", err + } + if len(workerImageInspect.RepoDigests) > 0 { + resolved.WorkerImageDigest = workerImageInspect.RepoDigests[0] + } else { + // fall back for non-pushed image + resolved.WorkerImageDigest = workerImageInspect.ID + } + info, err := cli.Info(context.Background()) + if err != nil { + return "", err + } + resolved.SelfNodeID = info.Swarm.NodeID + resolved.EnvDockerGraphDriver = os.Getenv("DOCKER_GRAPHDRIVER") + resolved.EnvDockerExperimental = os.Getenv("DOCKER_EXPERIMENTAL") + composeFilePath := filepath.Join(dir, "docker-compose.yml") + tmpl, err := template.New("").Parse(composeTemplate) + if err != nil { + return "", err + } + f, err := os.Create(composeFilePath) + if err != nil { + return "", err + } + defer f.Close() + if err = tmpl.Execute(f, resolved); err != nil { + return "", err + } + return composeFilePath, nil +} diff --git a/hack/integration-cli-on-swarm/host/dockercmd.go b/hack/integration-cli-on-swarm/host/dockercmd.go new file mode 100644 index 0000000..10ea0ec --- /dev/null +++ b/hack/integration-cli-on-swarm/host/dockercmd.go @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/client" +) + +func system(commands [][]string) error { + for _, c := range commands { + cmd := exec.Command(c[0], c[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = os.Environ() + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func pushImage(unusedCli *client.Client, remote, local string) error { + // FIXME: eliminate os/exec (but it is hard to pass auth without os/exec ...) + return system([][]string{ + {"docker", "image", "tag", local, remote}, + {"docker", "image", "push", remote}, + }) +} + +func deployStack(unusedCli *client.Client, stackName, composeFilePath string) error { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + return system([][]string{ + {"docker", "stack", "deploy", + "--compose-file", composeFilePath, + "--with-registry-auth", + stackName}, + }) +} + +func hasStack(unusedCli *client.Client, stackName string) bool { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + out, err := exec.Command("docker", "stack", "ls").Output() + if err != nil { + panic(fmt.Errorf("`docker stack ls` failed with: %s", string(out))) + } + // FIXME: not accurate + return strings.Contains(string(out), stackName) +} + +func removeStack(unusedCli *client.Client, stackName string) error { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + if err := system([][]string{ + {"docker", "stack", "rm", stackName}, + }); err != nil { + return err + } + // FIXME + time.Sleep(10 * time.Second) + return nil +} diff --git a/hack/integration-cli-on-swarm/host/enumerate.go b/hack/integration-cli-on-swarm/host/enumerate.go new file mode 100644 index 0000000..08e5ac7 --- /dev/null +++ b/hack/integration-cli-on-swarm/host/enumerate.go @@ -0,0 +1,55 @@ +package main + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "regexp" +) + +var testFuncRegexp *regexp.Regexp + +func init() { + testFuncRegexp = regexp.MustCompile(`(?m)^\s*func\s+\(\w*\s*\*(\w+Suite)\)\s+(Test\w+)`) +} + +func enumerateTestsForBytes(b []byte) ([]string, error) { + var tests []string + submatches := testFuncRegexp.FindAllSubmatch(b, -1) + for _, submatch := range submatches { + if len(submatch) == 3 { + tests = append(tests, fmt.Sprintf("%s.%s$", submatch[1], submatch[2])) + } + } + return tests, nil +} + +// enumareteTests enumerates valid `-check.f` strings for all the test functions. +// Note that we use regexp rather than parsing Go files for performance reason. +// (Try `TESTFLAGS=-check.list make test-integration-cli` to see the slowness of parsing) +// The files needs to be `gofmt`-ed +// +// The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`): +// "DockerAuthzSuite.TestAuthZPluginAPIDenyResponse$" +// "DockerAuthzSuite.TestAuthZPluginAllowEventStream$" +// ... +// "DockerTrustedSwarmSuite.TestTrustedServiceUpdate$" +func enumerateTests(wd string) ([]string, error) { + testGoFiles, err := filepath.Glob(filepath.Join(wd, "integration-cli", "*_test.go")) + if err != nil { + return nil, err + } + var allTests []string + for _, testGoFile := range testGoFiles { + b, err := ioutil.ReadFile(testGoFile) + if err != nil { + return nil, err + } + tests, err := enumerateTestsForBytes(b) + if err != nil { + return nil, err + } + allTests = append(allTests, tests...) + } + return allTests, nil +} diff --git a/hack/integration-cli-on-swarm/host/enumerate_test.go b/hack/integration-cli-on-swarm/host/enumerate_test.go new file mode 100644 index 0000000..d6049ae --- /dev/null +++ b/hack/integration-cli-on-swarm/host/enumerate_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" +) + +func getRepoTopDir(t *testing.T) string { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + wd = filepath.Clean(wd) + suffix := "hack/integration-cli-on-swarm/host" + if !strings.HasSuffix(wd, suffix) { + t.Skipf("cwd seems strange (needs to have suffix %s): %v", suffix, wd) + } + return filepath.Clean(filepath.Join(wd, "../../..")) +} + +func TestEnumerateTests(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + tests, err := enumerateTests(getRepoTopDir(t)) + if err != nil { + t.Fatal(err) + } + sort.Strings(tests) + t.Logf("enumerated %d test filter strings:", len(tests)) + for _, s := range tests { + t.Logf("- %q", s) + } +} + +func TestEnumerateTestsForBytes(t *testing.T) { + b := []byte(`package main +import ( + "github.com/go-check/check" +) + +func (s *FooSuite) TestA(c *check.C) { +} + +func (s *FooSuite) TestAAA(c *check.C) { +} + +func (s *BarSuite) TestBar(c *check.C) { +} + +func (x *FooSuite) TestC(c *check.C) { +} + +func (*FooSuite) TestD(c *check.C) { +} + +// should not be counted +func (s *FooSuite) testE(c *check.C) { +} + +// counted, although we don't support ungofmt file + func (s *FooSuite) TestF (c *check.C){} +`) + expected := []string{ + "FooSuite.TestA$", + "FooSuite.TestAAA$", + "BarSuite.TestBar$", + "FooSuite.TestC$", + "FooSuite.TestD$", + "FooSuite.TestF$", + } + + actual, err := enumerateTestsForBytes(b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected %q, got %q", expected, actual) + } +} diff --git a/hack/integration-cli-on-swarm/host/host.go b/hack/integration-cli-on-swarm/host/host.go new file mode 100644 index 0000000..d881b7a --- /dev/null +++ b/hack/integration-cli-on-swarm/host/host.go @@ -0,0 +1,198 @@ +package main + +import ( + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +const ( + defaultStackName = "integration-cli-on-swarm" + defaultVolumeName = "integration-cli-on-swarm" + defaultMasterImageName = "integration-cli-master" + defaultWorkerImageName = "integration-cli-worker" +) + +func main() { + rc, err := xmain() + if err != nil { + logrus.Fatalf("fatal error: %v", err) + } + os.Exit(rc) +} + +func xmain() (int, error) { + // Should we use cobra maybe? + replicas := flag.Int("replicas", 1, "Number of worker service replica") + chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)") + pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distribuetd execution. (empty == not to push)") + shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") + // flags below are rarely used + randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == curent time)") + filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings") + dryRun := flag.Bool("dry-run", false, "Dry run") + keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") + flag.Parse() + if *chunks == 0 { + *chunks = *replicas + } + if *randSeed == int64(0) { + *randSeed = time.Now().UnixNano() + } + cli, err := client.NewEnvClient() + if err != nil { + return 1, err + } + if hasStack(cli, defaultStackName) { + logrus.Infof("Removing stack %s", defaultStackName) + removeStack(cli, defaultStackName) + } + if hasVolume(cli, defaultVolumeName) { + logrus.Infof("Removing volume %s", defaultVolumeName) + removeVolume(cli, defaultVolumeName) + } + if err = ensureImages(cli, []string{defaultWorkerImageName, defaultMasterImageName}); err != nil { + return 1, err + } + workerImageForStack := defaultWorkerImageName + if *pushWorkerImage != "" { + logrus.Infof("Pushing %s to %s", defaultWorkerImageName, *pushWorkerImage) + if err = pushImage(cli, *pushWorkerImage, defaultWorkerImageName); err != nil { + return 1, err + } + workerImageForStack = *pushWorkerImage + } + compose, err := createCompose("", cli, composeOptions{ + Replicas: *replicas, + Chunks: *chunks, + MasterImage: defaultMasterImageName, + WorkerImage: workerImageForStack, + Volume: defaultVolumeName, + Shuffle: *shuffle, + RandSeed: *randSeed, + DryRun: *dryRun, + KeepExecutor: *keepExecutor, + }) + if err != nil { + return 1, err + } + filters, err := filtersBytes(*filtersFile) + if err != nil { + return 1, err + } + logrus.Infof("Creating volume %s with input data", defaultVolumeName) + if err = createVolumeWithData(cli, + defaultVolumeName, + map[string][]byte{"/input": filters}, + defaultMasterImageName); err != nil { + return 1, err + } + logrus.Infof("Deploying stack %s from %s", defaultStackName, compose) + defer func() { + logrus.Infof("NOTE: You may want to inspect or clean up following resources:") + logrus.Infof(" - Stack: %s", defaultStackName) + logrus.Infof(" - Volume: %s", defaultVolumeName) + logrus.Infof(" - Compose file: %s", compose) + logrus.Infof(" - Master image: %s", defaultMasterImageName) + logrus.Infof(" - Worker image: %s", workerImageForStack) + }() + if err = deployStack(cli, defaultStackName, compose); err != nil { + return 1, err + } + logrus.Infof("The log will be displayed here after some duration."+ + "You can watch the live status via `docker service logs %s_worker`", + defaultStackName) + masterContainerID, err := waitForMasterUp(cli, defaultStackName) + if err != nil { + return 1, err + } + rc, err := waitForContainerCompletion(cli, os.Stdout, os.Stderr, masterContainerID) + if err != nil { + return 1, err + } + logrus.Infof("Exit status: %d", rc) + return int(rc), nil +} + +func ensureImages(cli *client.Client, images []string) error { + for _, image := range images { + _, _, err := cli.ImageInspectWithRaw(context.Background(), image) + if err != nil { + return fmt.Errorf("could not find image %s, please run `make build-integration-cli-on-swarm`: %v", + image, err) + } + } + return nil +} + +func filtersBytes(optionalFiltersFile string) ([]byte, error) { + var b []byte + if optionalFiltersFile == "" { + tests, err := enumerateTests(".") + if err != nil { + return b, err + } + b = []byte(strings.Join(tests, "\n") + "\n") + } else { + var err error + b, err = ioutil.ReadFile(optionalFiltersFile) + if err != nil { + return b, err + } + } + return b, nil +} + +func waitForMasterUp(cli *client.Client, stackName string) (string, error) { + // FIXME(AkihiroSuda): it should retry until master is up, rather than pre-sleeping + time.Sleep(10 * time.Second) + + fil := filters.NewArgs() + fil.Add("label", "com.docker.stack.namespace="+stackName) + // FIXME(AkihiroSuda): we should not rely on internal service naming convention + fil.Add("label", "com.docker.swarm.service.name="+stackName+"_master") + masters, err := cli.ContainerList(context.Background(), types.ContainerListOptions{ + All: true, + Filters: fil, + }) + if err != nil { + return "", err + } + if len(masters) == 0 { + return "", fmt.Errorf("master not running in stack %s?", stackName) + } + return masters[0].ID, nil +} + +func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, containerID string) (int64, error) { + stream, err := cli.ContainerLogs(context.Background(), + containerID, + types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + }) + if err != nil { + return 1, err + } + stdcopy.StdCopy(stdout, stderr, stream) + stream.Close() + resultC, errC := cli.ContainerWait(context.Background(), containerID, "") + select { + case err := <-errC: + return 1, err + case result := <-resultC: + return result.StatusCode, nil + } +} diff --git a/hack/integration-cli-on-swarm/host/volume.go b/hack/integration-cli-on-swarm/host/volume.go new file mode 100644 index 0000000..c2f9698 --- /dev/null +++ b/hack/integration-cli-on-swarm/host/volume.go @@ -0,0 +1,88 @@ +package main + +import ( + "archive/tar" + "bytes" + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/client" +) + +func createTar(data map[string][]byte) (io.Reader, error) { + var b bytes.Buffer + tw := tar.NewWriter(&b) + for path, datum := range data { + hdr := tar.Header{ + Name: path, + Mode: 0644, + Size: int64(len(datum)), + } + if err := tw.WriteHeader(&hdr); err != nil { + return nil, err + } + _, err := tw.Write(datum) + if err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return &b, nil +} + +// createVolumeWithData creates a volume with the given data (e.g. data["/foo"] = []byte("bar")) +// Internally, a container is created from the image so as to provision the data to the volume, +// which is attached to the container. +func createVolumeWithData(cli *client.Client, volumeName string, data map[string][]byte, image string) error { + _, err := cli.VolumeCreate(context.Background(), + volume.VolumesCreateBody{ + Driver: "local", + Name: volumeName, + }) + if err != nil { + return err + } + mnt := "/mnt" + miniContainer, err := cli.ContainerCreate(context.Background(), + &container.Config{ + Image: image, + }, + &container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: volumeName, + Target: mnt, + }, + }, + }, nil, "") + if err != nil { + return err + } + tr, err := createTar(data) + if err != nil { + return err + } + if cli.CopyToContainer(context.Background(), + miniContainer.ID, mnt, tr, types.CopyToContainerOptions{}); err != nil { + return err + } + return cli.ContainerRemove(context.Background(), + miniContainer.ID, + types.ContainerRemoveOptions{}) +} + +func hasVolume(cli *client.Client, volumeName string) bool { + _, err := cli.VolumeInspect(context.Background(), volumeName) + return err == nil +} + +func removeVolume(cli *client.Client, volumeName string) error { + return cli.VolumeRemove(context.Background(), volumeName, true) +} diff --git a/hack/make.ps1 b/hack/make.ps1 new file mode 100644 index 0000000..c1cdad3 --- /dev/null +++ b/hack/make.ps1 @@ -0,0 +1,472 @@ +<# +.NOTES + Author: @jhowardmsft + + Summary: Windows native build script. This is similar to functionality provided + by hack\make.sh, but uses native Windows PowerShell semantics. It does + not support the full set of options provided by the Linux counterpart. + For example: + + - You can't cross-build Linux docker binaries on Windows + - Hashes aren't generated on binaries + - 'Releasing' isn't supported. + - Integration tests. This is because they currently cannot run inside a container, + and require significant external setup. + + It does however provided the minimum necessary to support parts of local Windows + development and Windows to Windows CI. + + Usage Examples (run from repo root): + "hack\make.ps1 -Client" to build docker.exe client 64-bit binary (remote repo) + "hack\make.ps1 -TestUnit" to run unit tests + "hack\make.ps1 -Daemon -TestUnit" to build the daemon and run unit tests + "hack\make.ps1 -All" to run everything this script knows about that can run in a container + "hack\make.ps1" to build the daemon binary (same as -Daemon) + "hack\make.ps1 -Binary" shortcut to -Client and -Daemon + +.PARAMETER Client + Builds the client binaries. + +.PARAMETER Daemon + Builds the daemon binary. + +.PARAMETER Binary + Builds the client and daemon binaries. A convenient shortcut to `make.ps1 -Client -Daemon`. + +.PARAMETER Race + Use -race in go build and go test. + +.PARAMETER Noisy + Use -v in go build. + +.PARAMETER ForceBuildAll + Use -a in go build. + +.PARAMETER NoOpt + Use -gcflags -N -l in go build to disable optimisation (can aide debugging). + +.PARAMETER CommitSuffix + Adds a custom string to be appended to the commit ID (spaces are stripped). + +.PARAMETER DCO + Runs the DCO (Developer Certificate Of Origin) test (must be run outside a container). + +.PARAMETER PkgImports + Runs the pkg\ directory imports test (must be run outside a container). + +.PARAMETER GoFormat + Runs the Go formatting test (must be run outside a container). + +.PARAMETER TestUnit + Runs unit tests. + +.PARAMETER All + Runs everything this script knows about that can run in a container. + + +TODO +- Unify the head commit +- Add golint and other checks (swagger maybe?) + +#> + + +param( + [Parameter(Mandatory=$False)][switch]$Client, + [Parameter(Mandatory=$False)][switch]$Daemon, + [Parameter(Mandatory=$False)][switch]$Binary, + [Parameter(Mandatory=$False)][switch]$Race, + [Parameter(Mandatory=$False)][switch]$Noisy, + [Parameter(Mandatory=$False)][switch]$ForceBuildAll, + [Parameter(Mandatory=$False)][switch]$NoOpt, + [Parameter(Mandatory=$False)][string]$CommitSuffix="", + [Parameter(Mandatory=$False)][switch]$DCO, + [Parameter(Mandatory=$False)][switch]$PkgImports, + [Parameter(Mandatory=$False)][switch]$GoFormat, + [Parameter(Mandatory=$False)][switch]$TestUnit, + [Parameter(Mandatory=$False)][switch]$All +) + +$ErrorActionPreference = "Stop" +$pushed=$False # To restore the directory if we have temporarily pushed to one. + +# Utility function to get the commit ID of the repository +Function Get-GitCommit() { + if (-not (Test-Path ".\.git")) { + # If we don't have a .git directory, but we do have the environment + # variable DOCKER_GITCOMMIT set, that can override it. + if ($env:DOCKER_GITCOMMIT.Length -eq 0) { + Throw ".git directory missing and DOCKER_GITCOMMIT environment variable not specified." + } + Write-Host "INFO: Git commit ($env:DOCKER_GITCOMMIT) assumed from DOCKER_GITCOMMIT environment variable" + return $env:DOCKER_GITCOMMIT + } + $gitCommit=$(git rev-parse --short HEAD) + if ($(git status --porcelain --untracked-files=no).Length -ne 0) { + $gitCommit="$gitCommit-unsupported" + Write-Host "" + Write-Warning "This version is unsupported because there are uncommitted file(s)." + Write-Warning "Either commit these changes, or add them to .gitignore." + git status --porcelain --untracked-files=no | Write-Warning + Write-Host "" + } + return $gitCommit +} + +# Utility function to get get the current build version of docker +Function Get-DockerVersion() { + if (-not (Test-Path ".\VERSION")) { Throw "VERSION file not found. Is this running from the root of a docker repository?" } + return $(Get-Content ".\VERSION" -raw).ToString().Replace("`n","").Trim() +} + +# Utility function to determine if we are running in a container or not. +# In Windows, we get this through an environment variable set in `Dockerfile.Windows` +Function Check-InContainer() { + if ($env:FROM_DOCKERFILE.Length -eq 0) { + Write-Host "" + Write-Warning "Not running in a container. The result might be an incorrect build." + Write-Host "" + return $False + } + return $True +} + +# Utility function to warn if the version of go is correct. Used for local builds +# outside of a container where it may be out of date with master. +Function Verify-GoVersion() { + Try { + $goVersionDockerfile=(Get-Content ".\Dockerfile" | Select-String "ENV GO_VERSION").ToString().Split(" ")[2] + $goVersionInstalled=(go version).ToString().Split(" ")[2].SubString(2) + } + Catch [Exception] { + Throw "Failed to validate go version correctness: $_" + } + if (-not($goVersionInstalled -eq $goVersionDockerfile)) { + Write-Host "" + Write-Warning "Building with golang version $goVersionInstalled. You should update to $goVersionDockerfile" + Write-Host "" + } +} + +# Utility function to get the commit for HEAD +Function Get-HeadCommit() { + $head = Invoke-Expression "git rev-parse --verify HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting HEAD commit" } + + return $head +} + +# Utility function to get the commit for upstream +Function Get-UpstreamCommit() { + Invoke-Expression "git fetch -q https://github.com/docker/docker.git refs/heads/master" + if ($LASTEXITCODE -ne 0) { Throw "Failed fetching" } + + $upstream = Invoke-Expression "git rev-parse --verify FETCH_HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting upstream commit" } + + return $upstream +} + +# Build a binary (client or daemon) +Function Execute-Build($type, $additionalBuildTags, $directory) { + # Generate the build flags + $buildTags = "autogen" + if ($Noisy) { $verboseParm=" -v" } + if ($Race) { Write-Warning "Using race detector"; $raceParm=" -race"} + if ($ForceBuildAll) { $allParm=" -a" } + if ($NoOpt) { $optParm=" -gcflags "+""""+"-N -l"+"""" } + if ($addtionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } + + # Do the go build in the appropriate directory + # Note -linkmode=internal is required to be able to debug on Windows. + # https://github.com/golang/go/issues/14319#issuecomment-189576638 + Write-Host "INFO: Building $type..." + Push-Location $root\cmd\$directory; $global:pushed=$True + $buildCommand = "go build" + ` + $raceParm + ` + $verboseParm + ` + $allParm + ` + $optParm + ` + " -tags """ + $buildTags + """" + ` + " -ldflags """ + "-linkmode=internal" + """" + ` + " -o $root\bundles\"+$directory+".exe" + Invoke-Expression $buildCommand + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile $type" } + Pop-Location; $global:pushed=$False +} + + +# Validates the DCO marker is present on each commit +Function Validate-DCO($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating Developer Certificate of Origin..." + # Username may only contain alphanumeric characters or dashes and cannot begin with a dash + $usernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + + $dcoPrefix="Signed-off-by:" + $dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \(github: ($usernameRegex)\))?$" + + $counts = Invoke-Expression "git diff --numstat $upstreamCommit...$headCommit" + if ($LASTEXITCODE -ne 0) { Throw "Failed git diff --numstat" } + + # Counts of adds and deletes after removing multiple white spaces. AWK anyone? :( + $adds=0; $dels=0; $($counts -replace '\s+', ' ') | %{ + $a=$_.Split(" "); + if ($a[0] -ne "-") { $adds+=[int]$a[0] } + if ($a[1] -ne "-") { $dels+=[int]$a[1] } + } + if (($adds -eq 0) -and ($dels -eq 0)) { + Write-Warning "DCO validation - nothing to validate!" + return + } + + $commits = Invoke-Expression "git log $upstreamCommit..$headCommit --format=format:%H%n" + if ($LASTEXITCODE -ne 0) { Throw "Failed git log --format" } + $commits = $($commits -split '\s+' -match '\S') + $badCommits=@() + $commits | %{ + # Skip commits with no content such as merge commits etc + if ($(git log -1 --format=format: --name-status $_).Length -gt 0) { + # Ignore exit code on next call - always process regardless + $commitMessage = Invoke-Expression "git log -1 --format=format:%B --name-status $_" + if (($commitMessage -match $dcoRegex).Length -eq 0) { $badCommits+=$_ } + } + } + if ($badCommits.Length -eq 0) { + Write-Host "Congratulations! All commits are properly signed with the DCO!" + } else { + $e = "`nThese commits do not have a proper '$dcoPrefix' marker:`n" + $badCommits | %{ $e+=" - $_`n"} + $e += "`nPlease amend each commit to include a properly formatted DCO marker.`n`n" + $e += "Visit the following URL for information about the Docker DCO:`n" + $e += "https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work`n" + Throw $e + } +} + +# Validates that .\pkg\... is safely isolated from internal code +Function Validate-PkgImports($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating pkg import isolation..." + + # Get a list of go source-code files which have changed under pkg\. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'pkg\*.go`'" + $badFiles=@(); $files | %{ + $file=$_ + # For the current changed file, get its list of dependencies, sorted and uniqued. + $imports = Invoke-Expression "go list -e -f `'{{ .Deps }}`' $file" + if ($LASTEXITCODE -ne 0) { Throw "Failed go list for dependencies on $file" } + $imports = $imports -Replace "\[" -Replace "\]", "" -Split(" ") | Sort-Object | Get-Unique + # Filter out what we are looking for + $imports = $imports -NotMatch "^github.com/docker/docker/pkg/" ` + -NotMatch "^github.com/docker/docker/vendor" ` + -Match "^github.com/docker/docker" ` + -Replace "`n", "" + $imports | % { $badFiles+="$file imports $_`n" } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! ".\pkg\*.go" is safely isolated from internal code.' + } else { + $e = "`nThese files import internal code: (either directly or indirectly)`n" + $badFiles | %{ $e+=" - $_"} + Throw $e + } +} + +# Validates that changed files are correctly go-formatted +Function Validate-GoFormat($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating go formatting on changed files..." + + # Verify gofmt is installed + if ($(Get-Command gofmt -ErrorAction SilentlyContinue) -eq $nil) { Throw "gofmt does not appear to be installed" } + + # Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" + $files = $files | Select-String -NotMatch "^vendor/" + $badFiles=@(); $files | %{ + # Deliberately ignore error on next line - treat as failed + $content=Invoke-Expression "git show $headCommit`:$_" + + # Next set of hoops are to ensure we have LF not CRLF semantics as otherwise gofmt on Windows will not succeed. + # Also note that gofmt on Windows does not appear to support stdin piping correctly. Hence go through a temporary file. + $content=$content -join "`n" + $content+="`n" + $outputFile=[System.IO.Path]::GetTempFileName() + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $content, (New-Object System.Text.UTF8Encoding($False))) + $currentFile = $_ -Replace("/","\") + Write-Host Checking $currentFile + Invoke-Expression "gofmt -s -l $outputFile" + if ($LASTEXITCODE -ne 0) { $badFiles+=$currentFile } + if (Test-Path $outputFile) { Remove-Item $outputFile } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! All Go source files are properly formatted.' + } else { + $e = "`nThese files are not properly gofmt`'d:`n" + $badFiles | %{ $e+=" - $_`n"} + $e+= "`nPlease reformat the above files using `"gofmt -s -w`" and commit the result." + Throw $e + } +} + +# Run the unit tests +Function Run-UnitTests() { + Write-Host "INFO: Running unit tests..." + $testPath="./..." + $goListCommand = "go list -e -f '{{if ne .Name """ + '\"github.com/docker/docker\"' + """}}{{.ImportPath}}{{end}}' $testPath" + $pkgList = $(Invoke-Expression $goListCommand) + if ($LASTEXITCODE -ne 0) { Throw "go list for unit tests failed" } + $pkgList = $pkgList | Select-String -Pattern "github.com/docker/docker" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/vendor" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/man" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/integration-cli" + $pkgList = $pkgList -replace "`r`n", " " + $goTestCommand = "go test" + $raceParm + " -cover -ldflags -w -tags """ + "autogen daemon" + """ -a """ + "-test.timeout=10m" + """ $pkgList" + Invoke-Expression $goTestCommand + if ($LASTEXITCODE -ne 0) { Throw "Unit tests failed" } +} + +# Start of main code. +Try { + Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)" + + # Get to the root of the repo + $root = $(Split-Path $MyInvocation.MyCommand.Definition -Parent | Split-Path -Parent) + Push-Location $root + + # Handle the "-All" shortcut to turn on all things we can handle. + # Note we expressly only include the items which can run in a container - the validations tests cannot + # as they require the .git directory which is excluded from the image by .dockerignore + if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True } + + # Handle the "-Binary" shortcut to build both client and daemon. + if ($Binary) { $Client = $True; $Daemon = $True } + + # Default to building the daemon if not asked for anything explicitly. + if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { $Daemon=$True } + + # Verify git is installed + if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" } + + # Verify go is installed + if ($(Get-Command go -ErrorAction SilentlyContinue) -eq $nil) { Throw "GoLang does not appear to be installed" } + + # Get the git commit. This will also verify if we are in a repo or not. Then add a custom string if supplied. + $gitCommit=Get-GitCommit + if ($CommitSuffix -ne "") { $gitCommit += "-"+$CommitSuffix -Replace ' ', '' } + + # Get the version of docker (eg 17.04.0-dev) + $dockerVersion=Get-DockerVersion + + # Give a warning if we are not running in a container and are building binaries or running unit tests. + # Not relevant for validation tests as these are fine to run outside of a container. + if ($Client -or $Daemon -or $TestUnit) { $inContainer=Check-InContainer } + + # If we are not in a container, validate the version of GO that is installed. + if (-not $inContainer) { Verify-GoVersion } + + # Verify GOPATH is set + if ($env:GOPATH.Length -eq 0) { Throw "Missing GOPATH environment variable. See https://golang.org/doc/code.html#GOPATH" } + + # Run autogen if building binaries or running unit tests. + if ($Client -or $Daemon -or $TestUnit) { + Write-Host "INFO: Invoking autogen..." + Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion } + Catch [Exception] { Throw $_ } + } + + # DCO, Package import and Go formatting tests. + if ($DCO -or $PkgImports -or $GoFormat) { + # We need the head and upstream commits for these + $headCommit=Get-HeadCommit + $upstreamCommit=Get-UpstreamCommit + + # Run DCO validation + if ($DCO) { Validate-DCO $headCommit $upstreamCommit } + + # Run `gofmt` validation + if ($GoFormat) { Validate-GoFormat $headCommit $upstreamCommit } + + # Run pkg isolation validation + if ($PkgImports) { Validate-PkgImports $headCommit $upstreamCommit } + } + + # Build the binaries + if ($Client -or $Daemon) { + # Create the bundles directory if it doesn't exist + if (-not (Test-Path ".\bundles")) { New-Item ".\bundles" -ItemType Directory | Out-Null } + + # Perform the actual build + if ($Daemon) { Execute-Build "daemon" "daemon" "dockerd" } + if ($Client) { + # Get the repo and commit of the client to build. + "hack\dockerfile\binaries-commits" | ForEach-Object { + $dockerCliRepo = ((Get-Content $_ | Select-String "DOCKERCLI_REPO") -split "=")[1] + $dockerCliCommit = ((Get-Content $_ | Select-String "DOCKERCLI_COMMIT") -split "=")[1] + } + + # Build from a temporary directory. + $tempLocation = "$env:TEMP\$(New-Guid)" + New-Item -ItemType Directory $tempLocation | Out-Null + + # Temporarily override GOPATH, then clone, checkout, and build. + $saveGOPATH = $env:GOPATH + Try { + $env:GOPATH = $tempLocation + $dockerCliRoot = "$env:GOPATH\src\github.com\docker\cli" + Write-Host "INFO: Cloning client repository..." + Invoke-Expression "git clone -q $dockerCliRepo $dockerCliRoot" + if ($LASTEXITCODE -ne 0) { Throw "Failed to clone client repository $dockerCliRepo" } + Invoke-Expression "git -C $dockerCliRoot checkout -q $dockerCliCommit" + if ($LASTEXITCODE -ne 0) { Throw "Failed to checkout client commit $dockerCliCommit" } + Write-Host "INFO: Building client..." + Push-Location "$dockerCliRoot\cmd\docker"; $global:pushed=$True + Invoke-Expression "go build -o $root\bundles\docker.exe" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client" } + Pop-Location; $global:pushed=$False + } + Catch [Exception] { + Throw $_ + } + Finally { + # Always restore GOPATH and remove the temporary directory. + $env:GOPATH = $saveGOPATH + Remove-Item -Force -Recurse $tempLocation + } + } + } + + # Run unit tests + if ($TestUnit) { Run-UnitTests } + + # Gratuitous ASCII art. + if ($Daemon -or $Client) { + Write-Host + Write-Host -ForegroundColor Green " ________ ____ __." + Write-Host -ForegroundColor Green " \_____ \ `| `|/ _`|" + Write-Host -ForegroundColor Green " / `| \`| `<" + Write-Host -ForegroundColor Green " / `| \ `| \" + Write-Host -ForegroundColor Green " \_______ /____`|__ \" + Write-Host -ForegroundColor Green " \/ \/" + Write-Host + } +} +Catch [Exception] { + Write-Host -ForegroundColor Red ("`nERROR: make.ps1 failed:`n$_") + + # More gratuitous ASCII art. + Write-Host + Write-Host -ForegroundColor Red "___________ .__.__ .___" + Write-Host -ForegroundColor Red "\_ _____/____ `|__`| `| ____ __`| _/" + Write-Host -ForegroundColor Red " `| __) \__ \ `| `| `| _/ __ \ / __ `| " + Write-Host -ForegroundColor Red " `| \ / __ \`| `| `|_\ ___// /_/ `| " + Write-Host -ForegroundColor Red " \___ / (____ /__`|____/\___ `>____ `| " + Write-Host -ForegroundColor Red " \/ \/ \/ \/ " + Write-Host + + Throw $_ +} +Finally { + Pop-Location # As we pushed to the root of the repo as the very first thing + if ($global:pushed) { Pop-Location } + Write-Host -ForegroundColor Cyan "INFO: make.ps1 ended at $(Get-Date)" +} diff --git a/hack/make.sh b/hack/make.sh new file mode 100755 index 0000000..ab1d1da --- /dev/null +++ b/hack/make.sh @@ -0,0 +1,296 @@ +#!/usr/bin/env bash +set -e + +# This script builds various binary artifacts from a checkout of the docker +# source code. +# +# Requirements: +# - The current directory should be a checkout of the docker source code +# (https://github.com/docker/docker). Whatever version is checked out +# will be built. +# - The VERSION file, at the root of the repository, should exist, and +# will be used as Docker binary version and package version. +# - The hash of the git commit will also be included in the Docker binary, +# with the suffix -unsupported if the repository isn't clean. +# - The script is intended to be run inside the docker container specified +# in the Dockerfile at the root of the source. In other words: +# DO NOT CALL THIS SCRIPT DIRECTLY. +# - The right way to call this script is to invoke "make" from +# your checkout of the Docker repository. +# the Makefile will do a "docker build -t docker ." and then +# "docker run hack/make.sh" in the resulting image. +# + +set -o pipefail + +export DOCKER_PKG='github.com/docker/docker' +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export MAKEDIR="$SCRIPTDIR/make" +export PKG_CONFIG=${PKG_CONFIG:-pkg-config} + +# We're a nice, sexy, little shell script, and people might try to run us; +# but really, they shouldn't. We want to be in a container! +inContainer="AssumeSoInitially" +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + if [ -z "$FROM_DOCKERFILE" ]; then + unset inContainer + fi +else + if [ "$PWD" != "/go/src/$DOCKER_PKG" ]; then + unset inContainer + fi +fi + +if [ -z "$inContainer" ]; then + { + echo "# WARNING! I don't seem to be running in a Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo + +# List of bundles to create when no argument is passed +DEFAULT_BUNDLES=( + binary-daemon + binary-docker + dynbinary + dynbinary-docker + + test-unit + test-integration-cli + test-docker-py + + cross + tgz +) + +VERSION=$(< ./VERSION) +! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') +if [ "$DOCKER_GITCOMMIT" ]; then + GITCOMMIT="$DOCKER_GITCOMMIT" +elif command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then + GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + GITCOMMIT="$GITCOMMIT-unsupported" + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + echo "# GITCOMMIT = $GITCOMMIT" + echo "# The version you are building is listed as unsupported because" + echo "# there are some files in the git repository that are in an uncommitted state." + echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version." + echo "# Here is the current list:" + git status --porcelain --untracked-files=no + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + fi +else + echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' + echo >&2 ' Please either build with the .git directory accessible, or specify the' + echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' + echo >&2 ' future accountability in diagnosing build issues. Thanks!' + exit 1 +fi + +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" + ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" + export GOPATH="${PWD}/.gopath" + + if [ "$(go env GOOS)" = 'solaris' ]; then + # sys/unix is installed outside the standard library on solaris + # TODO need to allow for version change, need to get version from go + export GO_VERSION=${GO_VERSION:-"1.8.1"} + export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}" + fi +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + +if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald" +elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald journald_compat" +fi + +# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately +if \ + command -v gcc &> /dev/null \ + && ! gcc -E - -o /dev/null &> /dev/null <<<'#include ' \ +; then + DOCKER_BUILDTAGS+=' btrfs_noversion' +fi + +# test whether "libdevmapper.h" is new enough to support deferred remove +# functionality. +if \ + command -v gcc &> /dev/null \ + && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \ +; then + DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' +fi + +# Use these flags when compiling the tests and final binary + +IAMSTATIC='true' +if [ -z "$DOCKER_DEBUG" ]; then + LDFLAGS='-w' +fi + +LDFLAGS_STATIC='' +EXTLDFLAGS_STATIC='-static' +# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build +# with options like -race. +ORIG_BUILDFLAGS=( -tags "autogen netgo static_build $DOCKER_BUILDTAGS" -installsuffix netgo ) +# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here + +# When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental +# builds by installing dependent packages to the GOPATH. +REBUILD_FLAG="-a" +if [ "$DOCKER_INCREMENTAL_BINARY" == "1" ] || [ "$DOCKER_INCREMENTAL_BINARY" == "true" ]; then + REBUILD_FLAG="-i" +fi +ORIG_BUILDFLAGS+=( $REBUILD_FLAG ) + +BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) +# Test timeout. + +if [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then + : ${TIMEOUT:=10m} +elif [ "${DOCKER_ENGINE_GOARCH}" == "windows" ]; then + : ${TIMEOUT:=8m} +else + : ${TIMEOUT:=5m} +fi + +LDFLAGS_STATIC_DOCKER=" + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC\" +" + +if [ "$(uname -s)" = 'FreeBSD' ]; then + # Tell cgo the compiler is Clang, not GCC + # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 + export CC=clang + + # "-extld clang" is a workaround for + # https://code.google.com/p/go/issues/detail?id=6845 + LDFLAGS="$LDFLAGS -extld clang" +fi + +HAVE_GO_TEST_COVER= +if \ + go help testflag | grep -- -cover > /dev/null \ + && go tool -n cover > /dev/null 2>&1 \ +; then + HAVE_GO_TEST_COVER=1 +fi + +# a helper to provide ".exe" when it's appropriate +binary_extension() { + if [ "$(go env GOOS)" = 'windows' ]; then + echo -n '.exe' + fi +} + +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + +bundle() { + local bundle="$1"; shift + echo "---> Making bundle: $(basename "$bundle") (in $DEST)" + source "$SCRIPTDIR/make/$bundle" "$@" +} + +copy_binaries() { + dir="$1" + # Add nested executables to bundle dir so we have complete set of + # them available, but only if the native OS/ARCH is the same as the + # OS/ARCH of the build target + if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + if [ -x /usr/local/bin/docker-runc ]; then + echo "Copying nested executables into $dir" + for file in containerd containerd-shim containerd-ctr runc init proxy; do + cp -f `which "docker-$file"` "$dir/" + if [ "$2" == "hash" ]; then + hash_files "$dir/docker-$file" + fi + done + fi + fi +} + +install_binary() { + file="$1" + target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/" + if [ "$(go env GOOS)" == "linux" ]; then + echo "Installing $(basename $file) to ${target}" + mkdir -p "$target" + cp -f -L "$file" "$target" + else + echo "Install is only supported on linux" + return 1 + fi +} + +main() { + # We want this to fail if the bundles already exist and cannot be removed. + # This is to avoid mixing bundles from different versions of the code. + mkdir -p bundles + if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then + echo "bundles/$VERSION already exists. Removing." + rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 + echo + fi + + if [ "$(go env GOHOSTOS)" != 'windows' ]; then + # Windows and symlinks don't get along well + + rm -f bundles/latest + ln -s "$VERSION" bundles/latest + fi + + if [ $# -lt 1 ]; then + bundles=(${DEFAULT_BUNDLES[@]}) + else + bundles=($@) + fi + for bundle in ${bundles[@]}; do + export DEST="bundles/$VERSION/$(basename "$bundle")" + # Cygdrive paths don't play well with go build -o. + if [[ "$(uname -s)" == CYGWIN* ]]; then + export DEST="$(cygpath -mw "$DEST")" + fi + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + bundle "$bundle" + echo + done +} + +main "$@" diff --git a/hack/make/.binary b/hack/make/.binary new file mode 100644 index 0000000..22641ff --- /dev/null +++ b/hack/make/.binary @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +set -e + +GO_PACKAGE=${GO_PACKAGE:-github.com/docker/docker/cmd/dockerd} +BINARY_SHORT_NAME=${BINARY_SHORT_NAME:-dockerd} +BINARY_NAME="$BINARY_SHORT_NAME-$VERSION" +BINARY_EXTENSION="$(binary_extension)" +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +source "${MAKEDIR}/.go-autogen" + +( +export GOGC=${DOCKER_BUILD_GOGC:-1000} + +if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + # must be cross-compiling! + case "$(go env GOOS)/$(go env GOARCH)" in + windows/amd64) + export CC=x86_64-w64-mingw32-gcc + export CGO_ENABLED=1 + ;; + esac +fi + +COMMIT_RUNC="$RUNC_COMMIT" +VERSION_RUNC="1.0.0-rc3" + +echo "Building: $DEST/$BINARY_FULLNAME" +go build \ + -o "$DEST/$BINARY_FULLNAME" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + -X github.com/docker/docker/vendor/github.com/opencontainers/runc.gitCommit=${COMMIT_RUNC} + -X github.com/docker/docker/vendor/github.com/opencontainers/runc.version=${VERSION_RUNC} + " \ + $GO_PACKAGE +) + +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION" + +hash_files "$DEST/$BINARY_FULLNAME" diff --git a/hack/make/.binary-setup b/hack/make/.binary-setup new file mode 100644 index 0000000..19c59d9 --- /dev/null +++ b/hack/make/.binary-setup @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +DOCKER_DAEMON_BINARY_NAME='dockerd' +DOCKER_RUNC_BINARY_NAME='docker-runc' +DOCKER_CONTAINERD_BINARY_NAME='docker-containerd' +DOCKER_CONTAINERD_CTR_BINARY_NAME='docker-containerd-ctr' +DOCKER_CONTAINERD_SHIM_BINARY_NAME='docker-containerd-shim' +DOCKER_PROXY_BINARY_NAME='docker-proxy' +DOCKER_INIT_BINARY_NAME='docker-init' + +docker_BINARY_NAME='docker' diff --git a/hack/make/.build-deb/compat b/hack/make/.build-deb/compat new file mode 100644 index 0000000..ec63514 --- /dev/null +++ b/hack/make/.build-deb/compat @@ -0,0 +1 @@ +9 diff --git a/hack/make/.build-deb/control b/hack/make/.build-deb/control new file mode 100644 index 0000000..0f54399 --- /dev/null +++ b/hack/make/.build-deb/control @@ -0,0 +1,29 @@ +Source: docker-engine +Section: admin +Priority: optional +Maintainer: Docker +Standards-Version: 3.9.6 +Homepage: https://dockerproject.org +Vcs-Browser: https://github.com/docker/docker +Vcs-Git: git://github.com/docker/docker.git + +Package: docker-engine +Architecture: linux-any +Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} +Recommends: aufs-tools, + ca-certificates, + cgroupfs-mount | cgroup-lite, + git, + xz-utils, + ${apparmor:Recommends} +Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs +Description: Docker: the open-source application container engine + Docker is an open source project to build, ship and run any application as a + lightweight container + . + Docker containers are both hardware-agnostic and platform-agnostic. This means + they can run anywhere, from your laptop to the largest EC2 compute instance and + everything in between - and they don't require you to use a particular + language, framework or packaging system. That makes them great building blocks + for deploying and scaling web apps, databases, and backend services without + depending on a particular stack or provider. diff --git a/hack/make/.build-deb/docker-engine.bash-completion b/hack/make/.build-deb/docker-engine.bash-completion new file mode 100644 index 0000000..6ea1119 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.bash-completion @@ -0,0 +1 @@ +contrib/completion/bash/docker diff --git a/hack/make/.build-deb/docker-engine.docker.default b/hack/make/.build-deb/docker-engine.docker.default new file mode 120000 index 0000000..4278533 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.docker.default @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.docker.init b/hack/make/.build-deb/docker-engine.docker.init new file mode 120000 index 0000000..8cb89d3 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.docker.init @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.docker.upstart b/hack/make/.build-deb/docker-engine.docker.upstart new file mode 120000 index 0000000..7e1b64a --- /dev/null +++ b/hack/make/.build-deb/docker-engine.docker.upstart @@ -0,0 +1 @@ +../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.install b/hack/make/.build-deb/docker-engine.install new file mode 100644 index 0000000..dc6b25f --- /dev/null +++ b/hack/make/.build-deb/docker-engine.install @@ -0,0 +1,12 @@ +#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ +#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ +#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ +contrib/*-integration usr/share/docker-engine/contrib/ +contrib/check-config.sh usr/share/docker-engine/contrib/ +contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/ +contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ +contrib/init/systemd/docker.service lib/systemd/system/ +contrib/init/systemd/docker.socket lib/systemd/system/ +contrib/mk* usr/share/docker-engine/contrib/ +contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ +contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ diff --git a/hack/make/.build-deb/docker-engine.manpages b/hack/make/.build-deb/docker-engine.manpages new file mode 100644 index 0000000..1aa6218 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.manpages @@ -0,0 +1 @@ +man/man*/* diff --git a/hack/make/.build-deb/docker-engine.postinst b/hack/make/.build-deb/docker-engine.postinst new file mode 100644 index 0000000..eeef6ca --- /dev/null +++ b/hack/make/.build-deb/docker-engine.postinst @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + if [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi + fi + ;; + abort-*) + # How'd we get here?? + exit 1 + ;; + *) + ;; +esac + +#DEBHELPER# diff --git a/hack/make/.build-deb/docker-engine.udev b/hack/make/.build-deb/docker-engine.udev new file mode 120000 index 0000000..914a361 --- /dev/null +++ b/hack/make/.build-deb/docker-engine.udev @@ -0,0 +1 @@ +../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/hack/make/.build-deb/docs b/hack/make/.build-deb/docs new file mode 100644 index 0000000..b43bf86 --- /dev/null +++ b/hack/make/.build-deb/docs @@ -0,0 +1 @@ +README.md diff --git a/hack/make/.build-deb/rules b/hack/make/.build-deb/rules new file mode 100755 index 0000000..19557ed --- /dev/null +++ b/hack/make/.build-deb/rules @@ -0,0 +1,53 @@ +#!/usr/bin/make -f + +VERSION = $(shell cat VERSION) +SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1) +SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true ) + +override_dh_gencontrol: + # if we're on Ubuntu, we need to Recommends: apparmor + echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars + dh_gencontrol + +override_dh_auto_build: + ./hack/make.sh dynbinary + # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +override_dh_auto_test: + ./bundles/$(VERSION)/dynbinary-daemon/dockerd -v + +override_dh_strip: + # Go has lots of problems with stripping, so just don't + +override_dh_auto_install: + mkdir -p debian/docker-engine/usr/bin + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd + cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy + cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd + cp -aT /usr/local/bin/docker-containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim + cp -aT /usr/local/bin/docker-containerd-ctr debian/docker-engine/usr/bin/docker-containerd-ctr + cp -aT /usr/local/bin/docker-runc debian/docker-engine/usr/bin/docker-runc + cp -aT /usr/local/bin/docker-init debian/docker-engine/usr/bin/docker-init + mkdir -p debian/docker-engine/usr/lib/docker + +override_dh_installinit: + # use "docker" as our service name, not "docker-engine" + dh_installinit --name=docker +ifeq (true, $(SYSTEMD_GT_227)) + $(warning "Setting TasksMax=infinity") + sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service +endif + +override_dh_installudev: + # match our existing priority + dh_installudev --priority=z80 + +override_dh_install: + dh_install + dh_apparmor --profile-name=docker-engine -pdocker-engine + +override_dh_shlibdeps: + dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info + +%: + dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) diff --git a/hack/make/.build-rpm/docker-engine.spec b/hack/make/.build-rpm/docker-engine.spec new file mode 100644 index 0000000..6225bb7 --- /dev/null +++ b/hack/make/.build-rpm/docker-engine.spec @@ -0,0 +1,249 @@ +Name: docker-engine +Version: %{_version} +Release: %{_release}%{?dist} +Summary: The open-source application container engine +Group: Tools/Docker + +License: ASL 2.0 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +# is_systemd conditional +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 +%global is_systemd 1 +%endif + +# required packages for build +# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh) +# only require systemd on those systems +%if 0%{?is_systemd} +%if 0%{?suse_version} >= 1210 +BuildRequires: systemd-rpm-macros +%{?systemd_requires} +%else +%if 0%{?fedora} >= 25 +# Systemd 230 and up no longer have libsystemd-journal (see https://bugzilla.redhat.com/show_bug.cgi?id=1350301) +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +%else +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +BuildRequires: pkgconfig(libsystemd-journal) +%endif +%endif +%else +Requires(post): chkconfig +Requires(preun): chkconfig +# This is for /sbin/service +Requires(preun): initscripts +%endif + +# required packages on install +Requires: /bin/sh +Requires: iptables +%if !0%{?suse_version} +Requires: libcgroup +%else +Requires: libcgroup1 +%endif +Requires: tar +Requires: xz +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 || 0%{?amzn} >= 1 +# Resolves: rhbz#1165615 +Requires: device-mapper-libs >= 1.02.90-1 +%endif +%if 0%{?oraclelinux} >= 6 +# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper +Requires: kernel-uek >= 4.1 +Requires: device-mapper >= 1.02.90-2 +%endif + +# docker-selinux conditional +%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global with_selinux 1 +%endif + +# DWZ problem with multiple golang binary, see bug +# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 +%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global _dwz_low_mem_die_limit 0 +%endif + +# start if with_selinux +%if 0%{?with_selinux} + +%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?fedora} >= 25 +Requires: container-selinux >= 2.9 +%endif# centos 7, rhel 7, fedora 25 + +%if 0%{?oraclelinux} >= 7 +%global selinux_policyver 3.13.1-102.0.3.el7_3.15 +%endif # oraclelinux 7 +%if 0%{?fedora} == 24 +%global selinux_policyver 3.13.1-191 +%endif # fedora 24 -- container-selinux on fedora24 does not properly set dockerd, for now just carry docker-engine-selinux for it +%if 0%{?oraclelinux} >= 7 || 0%{?fedora} == 24 +Requires: selinux-policy >= %{selinux_policyver} +Requires(pre): %{name}-selinux >= %{version}-%{release} +%endif # selinux-policy for oraclelinux-7, fedora-24 + +%endif # with_selinux + +# conflicting packages +Conflicts: docker +Conflicts: docker-io +Conflicts: docker-engine-cs + +%description +Docker is an open source project to build, ship and run any application as a +lightweight container. + +Docker containers are both hardware-agnostic and platform-agnostic. This means +they can run anywhere, from your laptop to the largest EC2 compute instance and +everything in between - and they don't require you to use a particular +language, framework or packaging system. That makes them great building blocks +for deploying and scaling web apps, databases, and backend services without +depending on a particular stack or provider. + +%prep +%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +export DOCKER_GITCOMMIT=%{_gitcommit} +./hack/make.sh dynbinary +# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +%check +./bundles/%{_origversion}/dynbinary-daemon/dockerd -v + +%install +# install binary +install -d $RPM_BUILD_ROOT/%{_bindir} +install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd + +# install proxy +install -p -m 755 /usr/local/bin/docker-proxy $RPM_BUILD_ROOT/%{_bindir}/docker-proxy + +# install containerd +install -p -m 755 /usr/local/bin/docker-containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd +install -p -m 755 /usr/local/bin/docker-containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim +install -p -m 755 /usr/local/bin/docker-containerd-ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr + +# install runc +install -p -m 755 /usr/local/bin/docker-runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc + +# install tini +install -p -m 755 /usr/local/bin/docker-init $RPM_BUILD_ROOT/%{_bindir}/docker-init + +# install udev rules +install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d +install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules + +# add init scripts +install -d $RPM_BUILD_ROOT/etc/sysconfig +install -d $RPM_BUILD_ROOT/%{_initddir} + + +%if 0%{?is_systemd} +install -d $RPM_BUILD_ROOT/%{_unitdir} +install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service +%else +install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker +install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker +%endif +# add bash, zsh, and fish completions +install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions +install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions +install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d +install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker +install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker +install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish + +# install manpages +install -d %{buildroot}%{_mandir}/man1 +install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 +install -d %{buildroot}%{_mandir}/man5 +install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 +install -d %{buildroot}%{_mandir}/man8 +install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8 + +# add vimfiles +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax +install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt +install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim + +# add nano +install -d $RPM_BUILD_ROOT/usr/share/nano +install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc + +# list files owned by the package here +%files +%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md +/%{_bindir}/docker +/%{_bindir}/dockerd +/%{_bindir}/docker-containerd +/%{_bindir}/docker-containerd-shim +/%{_bindir}/docker-containerd-ctr +/%{_bindir}/docker-proxy +/%{_bindir}/docker-runc +/%{_bindir}/docker-init +/%{_sysconfdir}/udev/rules.d/80-docker.rules +%if 0%{?is_systemd} +/%{_unitdir}/docker.service +%else +%config(noreplace,missingok) /etc/sysconfig/docker +/%{_initddir}/docker +%endif +/usr/share/bash-completion/completions/docker +/usr/share/zsh/vendor-completions/_docker +/usr/share/fish/vendor_completions.d/docker.fish +%doc +/%{_mandir}/man1/* +/%{_mandir}/man5/* +/%{_mandir}/man8/* +/usr/share/vim/vimfiles/doc/dockerfile.txt +/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +/usr/share/vim/vimfiles/syntax/dockerfile.vim +/usr/share/nano/Dockerfile.nanorc + +%post +%if 0%{?is_systemd} +%systemd_post docker +%else +# This adds the proper /etc/rc*.d links for the script +/sbin/chkconfig --add docker +%endif +if ! getent group docker > /dev/null; then + groupadd --system docker +fi + +%preun +%if 0%{?is_systemd} +%systemd_preun docker +%else +if [ $1 -eq 0 ] ; then + /sbin/service docker stop >/dev/null 2>&1 + /sbin/chkconfig --del docker +fi +%endif + +%postun +%if 0%{?is_systemd} +%systemd_postun_with_restart docker +%else +if [ "$1" -ge "1" ] ; then + /sbin/service docker condrestart >/dev/null 2>&1 || : +fi +%endif + +%changelog diff --git a/hack/make/.detect-daemon-osarch b/hack/make/.detect-daemon-osarch new file mode 100644 index 0000000..ac16055 --- /dev/null +++ b/hack/make/.detect-daemon-osarch @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +set -e + +docker-version-osarch() { + if ! type docker &>/dev/null; then + # docker is not installed + return + fi + local target="$1" # "Client" or "Server" + local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}" + if docker version -f "$fmtStr" 2>/dev/null; then + # if "docker version -f" works, let's just use that! + return + fi + docker version | awk ' + $1 ~ /^(Client|Server):$/ { section = 0 } + $1 == "'"$target"':" { section = 1; next } + section && $1 == "OS/Arch:" { print $2 } + + # old versions of Docker + $1 == "OS/Arch" && $2 == "('"${target,,}"'):" { print $3 } + ' +} + +# Retrieve OS/ARCH of docker daemon, e.g. linux/amd64 +export DOCKER_ENGINE_OSARCH="${DOCKER_ENGINE_OSARCH:=$(docker-version-osarch 'Server')}" +export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}" +export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}" +DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64} + +# and the client, just in case +export DOCKER_CLIENT_OSARCH="$(docker-version-osarch 'Client')" +export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}" +export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}" +DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64} + +# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/ +PACKAGE_ARCH='amd64' +case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in + arm) + PACKAGE_ARCH='armhf' + ;; + arm64) + PACKAGE_ARCH='aarch64' + ;; + amd64|ppc64le|s390x) + PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" + ;; + *) + echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'" + ;; +esac +export PACKAGE_ARCH + +DOCKERFILE='Dockerfile' +TEST_IMAGE_NAMESPACE= +case "$PACKAGE_ARCH" in + amd64) + case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in + windows) + DOCKERFILE='Dockerfile.windows' + ;; + solaris) + DOCKERFILE='Dockerfile.solaris' + ;; + esac + ;; + *) + DOCKERFILE="Dockerfile.$PACKAGE_ARCH" + TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH" + ;; +esac +export DOCKERFILE TEST_IMAGE_NAMESPACE diff --git a/hack/make/.ensure-emptyfs b/hack/make/.ensure-emptyfs new file mode 100644 index 0000000..7b00b9d --- /dev/null +++ b/hack/make/.ensure-emptyfs @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -e + +if ! docker inspect -t image emptyfs &> /dev/null; then + # let's build a "docker save" tarball for "emptyfs" + # see https://github.com/docker/docker/pull/5262 + # and also https://github.com/docker/docker/issues/4242 + dir="$DEST/emptyfs" + mkdir -p "$dir" + ( + cd "$dir" + echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cC "$dir" . | docker load ) + rm -rf "$dir" +fi diff --git a/hack/make/.go-autogen b/hack/make/.go-autogen new file mode 100644 index 0000000..ec20180 --- /dev/null +++ b/hack/make/.go-autogen @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +rm -rf autogen + +source hack/dockerfile/binaries-commits + +cat > dockerversion/version_autogen.go < dockerversion/version_autogen_unix.go < + +param( + [Parameter(Mandatory=$true)][string]$CommitString, + [Parameter(Mandatory=$true)][string]$DockerVersion +) + +$ErrorActionPreference = "Stop" + +# Utility function to get the build date/time in UTC +Function Get-BuildDateTime() { + return $(Get-Date).ToUniversalTime() +} + +try { + $buildDateTime=Get-BuildDateTime + + if (Test-Path ".\autogen") { + Remove-Item ".\autogen" -Recurse -Force | Out-Null + } + + $fileContents = ' +// +build autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "'+$CommitString+'" + Version string = "'+$DockerVersion+'" + BuildTime string = "'+$buildDateTime+'" +) + +// AUTOGENERATED FILE; see hack\make\.go-autogen.ps1 +' + + # Write the file without BOM + $outputFile="$(pwd)\dockerversion\version_autogen.go" + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $fileContents, (New-Object System.Text.UTF8Encoding($False))) + + New-Item -ItemType Directory -Path "autogen\winresources\tmp" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\docker" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\dockerd" | Out-Null + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\docker" + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\dockerd" + + # Generate a version in the form major,minor,patch,build + $versionQuad=$DockerVersion -replace "[^0-9.]*" -replace "\.", "," + + # Compile the messages + windmc hack\make\.resources-windows\event_messages.mc -h autogen\winresources\tmp -r autogen\winresources\tmp + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile event message resources" } + + # If you really want to understand this madness below, search the Internet for powershell variables after verbatim arguments... Needed to get double-quotes passed through to the compiler options. + # Generate the .syso files containing all the resources and manifest needed to compile the final docker binaries. Both 32 and 64-bit clients. + $env:_ag_dockerVersion=$DockerVersion + $env:_ag_gitCommit=$CommitString + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 64-bit resources" } + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_386.syso -F pe-i386 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 32-bit resources" } + + windres -i hack/make/.resources-windows/dockerd.rc -o autogen/winresources/dockerd/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile daemon resources" } +} +Catch [Exception] { + # Throw the error onto the caller to display errors. We don't expect this script to be called directly + Throw ".go-autogen.ps1 failed with error $_" +} +Finally { + Remove-Item .\autogen\winresources\tmp -Recurse -Force -ErrorAction SilentlyContinue | Out-Null + $env:_ag_dockerVersion="" + $env:_ag_gitCommit="" +} diff --git a/hack/make/.integration-daemon-setup b/hack/make/.integration-daemon-setup new file mode 100644 index 0000000..5134e4c --- /dev/null +++ b/hack/make/.integration-daemon-setup @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +set -e + +bundle .detect-daemon-osarch +if [ "$DOCKER_ENGINE_GOOS" != "windows" ]; then + bundle .ensure-emptyfs +fi diff --git a/hack/make/.integration-daemon-start b/hack/make/.integration-daemon-start new file mode 100644 index 0000000..5c20e97 --- /dev/null +++ b/hack/make/.integration-daemon-start @@ -0,0 +1,132 @@ +#!/usr/bin/env bash + +# see test-integration-cli for example usage of this script + +base="$ABS_DEST/.." +export PATH="$base/dynbinary-docker:$PATH" + +export TEST_CLIENT_BINARY=docker +export TEST_DOCKER_RUNC_BINARY=docker-runc + +# Do not bump this version! Integration tests should no longer rely on the docker cli, they should be +# API tests instead. For the existing tests the scripts will use a frozen version of the docker cli +# with a DOCKER_API_VERSION frozen to 1.30, which should ensure that the CI remains green at all times. +export DOCKER_API_VERSION=1.30 +if [ -n "$DOCKER_CLI_PATH" ]; then + export TEST_CLIENT_BINARY=/usr/local/cli/$(basename "$DOCKER_CLI_PATH") +fi + +echo "Using test binary $TEST_CLIENT_BINARY" +if ! command -v "$TEST_CLIENT_BINARY" &> /dev/null; then + echo >&2 'error: missing test client $TEST_CLIENT_BINARY' + false +fi + +export DOCKER_CLI_VERSION=$(${TEST_CLIENT_BINARY} --version | awk '{ gsub(",", " "); print $3 }') +export DOCKER_RCE_RUNC_VERSION=$(${TEST_DOCKER_RUNC_BINARY} --version | awk '{ gsub(",", " "); print $3 }') + +# This is a temporary hack for split-binary mode. It can be removed once +# https://github.com/docker/docker/pull/22134 is merged into docker master +if [ "$(go env GOOS)" = 'windows' ]; then + return +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + if docker version &> /dev/null; then + echo >&2 'skipping daemon start, since daemon appears to be already started' + return + fi +fi + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before .integration-daemon-start' + false +fi + +# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers +exec 41>&1 42>&2 + +export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + +# example usage: DOCKER_REMAP_ROOT=default +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +# example usage: DOCKER_EXPERIMENTAL=1 +if [ "$DOCKER_EXPERIMENTAL" ]; then + echo >&2 '# DOCKER_EXPERIMENTAL is set: starting daemon with experimental features enabled! ' + extra_params="$extra_params --experimental" +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + # Start apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + # reset container variable so apparmor profile is applied to process + # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 + export container="" + ( + set -x + /etc/init.d/apparmor start + ) + fi + + export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one + ( set -x; exec \ + dockerd --debug \ + --host "$DOCKER_HOST" \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --pidfile "$DEST/docker.pid" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params \ + &> "$DEST/docker.log" + ) & + # make sure that if the script exits unexpectedly, we stop this daemon we just started + trap 'bundle .integration-daemon-stop' EXIT +else + export DOCKER_HOST="$DOCKER_TEST_HOST" +fi + +# give it a little time to come up so it's "ready" +tries=60 +echo "INFO: Waiting for daemon to start..." +while ! $TEST_CLIENT_BINARY version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + printf "\n" + if [ -z "$DOCKER_HOST" ]; then + echo >&2 "error: daemon failed to start" + echo >&2 " check $DEST/docker.log for details" + else + echo >&2 "error: daemon at $DOCKER_HOST fails to '$TEST_CLIENT_BINARY version':" + $TEST_CLIENT_BINARY version >&2 || true + # Additional Windows CI debugging as this is a common error as of + # January 2016 + if [ "$(go env GOOS)" = 'windows' ]; then + echo >&2 "Container log below:" + echo >&2 "---" + # Important - use the docker on the CI host, not the one built locally + # which is currently in our path. + ! /c/bin/docker -H=$MAIN_DOCKER_HOST logs docker-$COMMITHASH + echo >&2 "---" + fi + fi + false + fi + printf "." + sleep 2 +done +printf "\n" diff --git a/hack/make/.integration-daemon-stop b/hack/make/.integration-daemon-stop new file mode 100644 index 0000000..f8533a7 --- /dev/null +++ b/hack/make/.integration-daemon-stop @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +if [ ! "$(go env GOOS)" = 'windows' ]; then + trap - EXIT # reset EXIT trap applied in .integration-daemon-start + + for pidFile in $(find "$DEST" -name docker.pid); do + pid=$(set -x; cat "$pidFile") + ( set -x; kill "$pid" ) + if ! wait "$pid"; then + echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" + fi + done + + if [ -z "$DOCKER_TEST_HOST" ]; then + # Stop apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + ( + set -x + /etc/init.d/apparmor stop + ) + fi + fi +else + # Note this script is not actionable on Windows to Linux CI. Instead the + # DIND daemon under test is torn down by the Jenkins tear-down script + echo "INFO: Not stopping daemon on Windows CI" +fi diff --git a/hack/make/.integration-test-helpers b/hack/make/.integration-test-helpers new file mode 100644 index 0000000..948337c --- /dev/null +++ b/hack/make/.integration-test-helpers @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +: ${TEST_REPEAT:=0} + +bundle_test_integration_cli() { + TESTFLAGS="$TESTFLAGS -check.v -check.timeout=${TIMEOUT} -test.timeout=360m" + go_test_dir integration-cli $DOCKER_INTEGRATION_TESTS_VERIFIED +} + +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want +# to run certain tests on your local host, you should run with command: +# +# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli +# +go_test_dir() { + dir=$1 + precompiled=$2 + testbinary="$ABS_DEST/test.main" + testcover=() + testcoverprofile=() + ( + set -e + mkdir -p "$DEST/coverprofiles" + export DEST="$ABS_DEST" # in a subshell this is safe -- our integration-cli tests need DEST, and "cd" screws it up + if [ -z $precompiled ]; then + ensure_test_dir $1 $testbinary + fi + cd "$dir" + i=0 + while ((++i)); do + test_env "$testbinary" $TESTFLAGS + if [ $i -gt "$TEST_REPEAT" ]; then + break + fi + echo "Repeating test ($i)" + done + ) +} + +ensure_test_dir() { + ( + # make sure a test dir will compile + dir="$1" + out="$2" + echo Building test dir: "$dir" + set -xe + cd "$dir" + go test -c -o "$out" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" + ) +} + +test_env() { + ( + set -xe + # use "env -i" to tightly control the environment variables that bleed into the tests + env -i \ + DEST="$DEST" \ + DOCKER_CLI_VERSION="$DOCKER_CLI_VERSION" \ + DOCKER_API_VERSION="$DOCKER_API_VERSION" \ + DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \ + DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ + DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ + DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \ + DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ + DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \ + DOCKER_HOST="$DOCKER_HOST" \ + DOCKER_REMAP_ROOT="$DOCKER_REMAP_ROOT" \ + DOCKER_REMOTE_DAEMON="$DOCKER_REMOTE_DAEMON" \ + DOCKERFILE="$DOCKERFILE" \ + GOPATH="$GOPATH" \ + GOTRACEBACK=all \ + HOME="$ABS_DEST/fake-HOME" \ + PATH="$PATH" \ + TEMP="$TEMP" \ + TEST_IMAGE_NAMESPACE="$TEST_IMAGE_NAMESPACE" \ + TEST_CLIENT_BINARY="$TEST_CLIENT_BINARY" \ + DOCKER_RCE_RUNC_VERSION="$DOCKER_RCE_RUNC_VERSION" \ + "$@" + ) +} diff --git a/hack/make/.resources-windows/common.rc b/hack/make/.resources-windows/common.rc new file mode 100644 index 0000000..000fb35 --- /dev/null +++ b/hack/make/.resources-windows/common.rc @@ -0,0 +1,38 @@ +// Application icon +1 ICON "docker.ico" + +// Windows executable manifest +1 24 /* RT_MANIFEST */ "docker.exe.manifest" + +// Version information +1 VERSIONINFO + +#ifdef DOCKER_VERSION_QUAD +FILEVERSION DOCKER_VERSION_QUAD +PRODUCTVERSION DOCKER_VERSION_QUAD +#endif + +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "000004B0" + BEGIN + VALUE "ProductName", DOCKER_NAME + +#ifdef DOCKER_VERSION + VALUE "FileVersion", DOCKER_VERSION + VALUE "ProductVersion", DOCKER_VERSION +#endif + +#ifdef DOCKER_COMMIT + VALUE "OriginalFileName", DOCKER_COMMIT +#endif + + END + END + + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0000, 0x04B0 + END +END diff --git a/hack/make/.resources-windows/docker.exe.manifest b/hack/make/.resources-windows/docker.exe.manifest new file mode 100644 index 0000000..674bc94 --- /dev/null +++ b/hack/make/.resources-windows/docker.exe.manifest @@ -0,0 +1,18 @@ + + + Docker + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/hack/make/.resources-windows/docker.ico b/hack/make/.resources-windows/docker.ico new file mode 100644 index 0000000000000000000000000000000000000000..c6506ec8dbd8e295d98a084412e9d2c358a6ba39 GIT binary patch literal 370070 zcmeEP2Y405+TL?c0%F4sD!rrFr6_v!{$4AJ1?*S7-fQo@E1)91i}a!b(rX$m0Ro|S zl8_#Huc9K5%>TY~c9PA>Nlzfm^YzX_0Ys1`^UF*YMa1~pn zQjstq44g3rHUvNJ**NcYPxRuB0h?D14oKMWTR>{++JN8Fl)E5}8uGT~8vB{$p7Fiq z3GHSD%>rR6Jw0Ie?|buOFq?qadi(lEl=jLTyr^ZHo z7akkcdFke*UDm`d?HV1stb2UylFl*Mw>EBh&(-lO`z&8Q{gHVQeLchH_qMvv>*e}n zb`R@6u;-^>-%l(WU_HF#C+qHIL#^iPMp?BEgn-W;06W+I1Ne$J)^pInx>VeC~g3qScfHtL7TiJK;c zro@FsC2sibAoeS4E1a*Ay8wPnjH6}NVbS~}?= zHMiddDy*BO!n(Tvrj~*aP;+{E0bihiKd`-Zc6VzT(lGdjX$$&WgBJa0y}Pi#)e_s) zR*keSKCsZL)V-|m<$Jfyd z-o1A7ioUNV$IkjTe$BAx#ElbAr6kOUZO^qaPo*S8*tWem9d>>)HF4pIl*EV=DGA}g zd>)^cf%v&;h$Yguq%KQO*f=45`IJY~X7>zC3+`;Cg?6>lVDqPJoJ4xU#!1WtoC4B; z4BvpVYxOjiWzj9ySk~ zj(}}PoWwS5JlzJ$;2-ocJkEi*xoS)D61{KjocGk+{+Fnb&USy_!wp-`?DzfIHuk04 zFn)qQ!uShQrsyM#pD^(Q(w*=PD-kP9!Cc~th!YyYCtT(f|D>-i*VK-P5qe^taiEMB zoZLvh{4lV2c^`MurtuklJ?{+T@Ou+CP3WDpX;QL|-4nuL&#+y@>4@QJJ&(`1Pa2$OBeT0r57&rJH2VbBt z&#)aa!U*_-k1#*b+*iUO-7F8EIr#3EZDF7+G7!IZxM%mlR9EbhuDY*}jqF%Ee(i|& zF=mfTiJN^qIev~xO~lw-_wC`Zb@uDFAIQnJ^V*MbLTVD{03y|zu(#CgUgxNgA7Jma z`yP4r_xZK=ypA*ez{Cpl4SFmQYR@qsKKKLUhB(9rLos&v=c?h}@q&?aw-+oIVupaF zc=l-aNh%DK1qL>+=z}@@p`PtqmU-=dafj;@H;x~H`T0Z1@p}B8o{|`W{+?rY_VJFG zy(oFu{+8q@l^8ukEgAa{ZTF$wX!nfui@nb;Or7)t977Q1{B({P;0tyDzu?)yODjiM z=NdV~U%0p)0dtKqe`rXNv{W(>z2IXv*5T;*b?R%Dd-Jj$EfUs^o|YUNl*V`*^K`U% z_VLAM^TuD$_EVRu&8vs0@Ike`Iz?gm|(@QtaFN`QW>OC6^eoQHN)Mh3E?_c-8BELrU~mtO-zmr{uBK? z;`;@(dyMIev9C9F&lE8QZGY3^E^1Ei^HgXT+L7mU+jCK`XJP|0caY9G1NeeC_=3Jz zU({47uiaz3;6kk6VN6h=luT~6)G-jf@MAZg*Xg;v^+D|}k6$yiJK}uUI_y4i0sH!* zkLjJZ?whCW`t*Vqksn<1UWzdAZ-(S?-u6ou0xNe=DEUd*l2PwtLg>rz}_N=6wvi zcYB|~m7cMFPPSdp{bml4_h({)&F}+XE*s{3o;c=RtWCxmBg6?351c0bugI6#IPW#r z#<{O*+h055!J6@_hfT!#J=#6S^&H>x*?ck0>E%=p$NRAT_2FN5pTBp{*Yo^>+Fofp zUdIGx?hs>u!+1u}W9bm95%rPpAn=(1)*p! z>{uVRUqa)1UmsxGKF9lE-79(5@SeVBd!@lA=$L@<0DS@04;{fcU@-hZUA4nIM}T=m zJ}(eIP=dBl(W;JH(M|XHk^QYISku!xDK-et@vuHGF`Vml3cgOKFz0Z*k7s%DjmRM$?IRZa=vLwV-D+MR_y#<8=USW$e_%g@D?`3O`vJ6H`vHsr=HY$A>!=Ci z5DUNuNGwozv#1bf;$3?eY+c(v*73cIHGC>1X%YH+zF%8-+c$lDYGOESev#U>ZJEQ? zH>x8CqJd2sw0kc$==}$FuTU$-HAH`J!QKNiY}nXxp;L#SU>%F$6CwwDblfl>K4ETe zuRoyuXZQr<%YZ+~cy@^O1Ha?>;1BD6vCg6bW1H_BN-R(zthwMfnH)D;_YrZchjiex zJ*?%y8s2%F<43=*L0{ktQ)Z``eu8$-^R^~0LjS%@?ccpd+jgd{Z?bLO*nDQnzW?aq z6t#Kbt7_3t7W(x{#b^7DSRjl4@Z%FWj$n+yu|pY)5BS_rzjJ_RhU@W7!@H;voD;xz zjwBW+xOtT8Gg6b6>V5&=*y)rUhj;Xn7N5dAKfblW@9Ow|TPN#xc37Umclx|*{nKIh z@DUMd7{8~%=1(2kzu^@6_*3ZDZ7{y~#`u~2eWvXn-nU*IJ(Qq!Cr?*VBhFKchq_?< z0VQw$e*A(NPtZ?r-huvra|v9hYhr^^921zi0k$_Cw*N!iB=0&xt|i8^Lx}~-wN>Zu z<`UNas`Vf#W@cx`{P-3}T4MARmALUYm9SyVse}zi_;}2sz?;X?A<`L%~+?;;oV9ZJ6PaNL2@g&OA zv-$zV{)gh##?ZE>ukZI``&nWGlp9~bbjeVR6)?Xzzn|Od3+(3&rPLSjU86L{FPIlt ziM2w_s2Ruy;+tmT1M+7k<@%_^b-%b$;^#C-ik(?!^NN0TVxqpUyK&w-bvDd?vG#`9 z|Es-j)?>BS&TLz2-KfG&${phb&ej2 zYqEbwd@Y`-m(9|&F~A+ z^bPa_CyyRZJh?MryjncWea8BIU)%S$ZT!Jv_yRt22=9lu0N)QRrE!6oALO&b!+1vU zsyc4DKD^KSjG$aAx7=Kg4B+{YCmi3s59@3NOvRe=91kpFK|pVJrU zxBz1W_P1Pf!1=>c6c_0ELADKZgCot{ApC*M2^3Ms%i8If6UuR|s5*R$XHR(bb7Ci) zbLpmWRW9E&?ySq#{ZgsU+Mg@cBQ}hwTyxE+v#wY>x@xnHV=inIJE8jJ2f}X2_C4IZ zI2nAa6%aMN=_T6}C!L=d`9{0#vA;~&kuZMt^3fOkv3LmP{D%dkmo{6^t37&r!1)3^ zQ{ego&K;Jrn1Hc>nHyZOe3*4L<%IEoe)f}>%%r^RGk~=r+V(O2dmhh*r$Y7<5Eo8` zUQ9>dFdOqli-FbH7YiiV*ou_jv)hic6g)pqTsl1P;L_m%yMaVJqhE?~@hrr{(-#j3 zn6_kSz|>{KD*d)%MCHM&Mpk}r#fVDv7Y#YL>Z)IBT$&Qr<~*g+{l*)S&sd)2L#nh~ zHazgzs9^!Iw!QiB1Dq2)GxLIZ`2bUHd;!M?^D#bPzgvo)Cu$!6dG5t?!TYGyd_Ex0 z2+O{G=XM9?8xrXM?}Y62yb$}RoZ!?~lMf%@!n}Ouni`|;xu!!sr*O{SQNsfNTt2)~ z!m{B3-QXu)TsgAJvm1Xo{|dHw{mE$Xdet>g>{QgiH#)7X<20=f-|N4BxocrZh_8Gwc zwbS;s{%7k#JOhfRJ;Uz*2G|!kVb+nDHRj$qWPJO6;UIS!^qJ#wU!20elYYSc{>Sk{ zJ?VOF-!M-y?0n&hk(K)|9bT!?+EM3JC)-g&1Dj{qzis0S2U=<|p2aO0Q3XCI0Q2zJ z9*XVaTpv_|{zAtD@LOD;%ymZeyS5y1^uau)1i`fex*x!tz_;Wb-zj(JHa_Gr@+*4` z1b6k$^FYqMZGF-)U|zaK{hZU&rPs*$uBU}<7qstXUrx|hoB-$_OdaX8cf|gwONV

-m7;XQ|EM&#Ha#!_>j8Q`Nqt(JCSGRkd`~Meqs5yhbpyO{Q(dnwqfz z=L_fdIpcmn&k1mRg7M(6#7W*YgYsR0vS;HtwHV)-_r}yeS*jGCU4^y+I4OdZ-Gu6QZN$Sv! zFtuuY<1*(1cz=e|g+F=py9kE!{K%%5kI7gs1RpRhak6zDb&3BAIVYWx{bhpz^S`Xs zZp(wXf01qbjPY~#0lMEWaohLjL+B3>8+hmcBQe(B5i?lr*%PNaeeqxQ;J>a_58i!) zdf={W)rW6Cu96d1sr_4KV7=fu-g$viJ};>KnooRT`~c?)`A*Uqe0Hc~Ld*ruTRYn8 z15DphHnd$@>B|7(I{kbX&v~C_j0d~VSKQBIIcHT2F~2_>&wC%n^jHU=)=X`o{`wWN2gQ;$E~TBrB@-?i$?kDpgZ@V-FI-1ce_<^xX4`vN}p>(8bOvoFIB zV6CzC1N70Se+_}|8{h*LZW?13eqHpJ&`pJ&VBm@n_WbX$P?XMd<&xto&TX$1YaSwEu^qz0>|5d$1MqE!3+o zKBOLbpt)MUG+6D99e{ZX+JDA;Vwuky8Sr@l=U`8>574>-9}tpYzdM9ygEAjbM%;i| z%dzoS{k(6Ztvk8){XAwq#-F9ahV1@7cHWcf@WDj&+J7HVZSQNYo_O>&_3-`8)!+eN zt35kbsrg|e)%I;0Rm$>@^*cjlrt+^!<#j#D4M`;>bA zsk_wA!@pO@4#(l0p%j%E^`Up{kK<2EyGqLE<+on=frW!S`klkm6bqbUAA+?bUsCr& z?6o9i!44{78xi~KIsd4itb6&q-?o1~=OMbE&d@hb?e@PxPF2>gc{D>d#}T zYR@iwS785Ub#V7G6}RwZy%u;0*8w`tDerkBs|c*eK22kSQ_v-aHRR9GrjW`zL#5QY zOz-HmfAsx*ZJW>anIH9s-?%wr&-(JT+W;NgFCBG(T0ip+tOI;nZ3?+pE&sKecioSE zPoQ+a2T)S}$czc*nK3~QK0YVkUx59(AHn+qe_&nEom743m%KY%%6tIE`+9x`?{-$l zdY}Dve3)zB&+k{5{-mVzw5at)aXtWRf%uG%&jq+Pfb#>~U)tXVEb6`I=X}bK&kNZ{ zm*AK{`vB-wD*i8d9jd;0ucVZjL4|D&p7ZN@Kg`oUWY7EMX#4d2y6?xFQhs#o%pZe3 zp7w4~2G{zPB!~0?oWJF_$@DtUl9?aWu_XHbxjSZBfiyJptf|5pT`9V8_+}lGVXkJH z9oyyj%%A;4S?K#q(q_tI-6j^8U2?I&N%kAiv0hYtj0t2MPzrp&GW)x(DO0U;Ap02G z{>}Vfwz)sba8aoq_m`Epr1Z5jGZyf^Pf&nyel|Vvb8b5Njx>%nA_rRSsQOqZB=do1 z*azU7K3WENf4ePwGRJ|8{r$%O?EB}I;yM5F*e{=ndor=W9QgWTewTngfZr6|xpKI7 zE&y{+vOe$(`T)p7+dswt?}Sm_-K`UTe7E-RlwGNN-k;}lfYR5`o(Y>S=rwgLfH@vM ztxmJyk^?_%?1(I-fP5zT;7YTsP7O4Ef?x&%j}i zzx~{QkX0M=GTJ9|e$Ou;uZ06$YUv2B_Z3t$4k-5}_#Z3y{|{=m{S4Tzp5&xI+4pQ+Hr z-01+*YzRhq_`pE5U~tJ|A?nWyCEbRi)Spw^i&AGvpPN&go%`7a?{+0Mr*B|3{qUzV z`Pg?dxSLyr^sJ=E0k1rD?fLE7mYlv9(2i((v_aY;^OpcMfJ=cY#T5gfu6n==z#t$J zI0*2273sj=!0*6e;8-z%^&Pcw?g&bYpVE6u5&b{jc3+fR<4ZHKy$YQ7!%VPsR_^x>A&bzl89dU^z9 z1Z}_x?lXGKV|~vr?6#@*IOq$D4ZVEliuUoTJ;HSvT9ucHe_*-;Xx1J-o*!(0;b_X-B)cuRY4LoP+kZ zz2D2DX)o_T%QXwZGtYN&&AQ1qxnX0R$1zD)_Vr>f8d(ozz0Ap;L_si^PTmeUe0-V z2(zF2m`8dc@H`L#Fcu&`rvUmT`pn#L(mu|Y)4#Ak1z6SopRRPf2V=! z@Mfd*FJEh-K7RRX^~Q78sTUq^t{#2R>mR5;1a$!CLSI^GnK7RmcpS^A>pY)!|Ma7+ z)cgN!t~$SWqZ;_tb!u#vW-6%P)hc30Q?-0lW3_Iq7n{a6QZW;}SU;|@TK#Kde7n1e zS}?S!3hIBgn$Z0kHKNnCs?Vp_s}677q~3k8xqANbmik(}4)GAK`$!wF-^g#9o%ch% zr2dl!-UI8T-MsX4OZD{|H>rM~U$1`Ye65<%=W4ZZSW~s~m&R)Cn8qp^ZI4FVc`w`- zIUH@qJ&x^ijT-pn^{Ug`H>%hF-9pz(KgM=G%68&f+Q;R=tG_n#Jl0{_X*hWHu~zD> z7n-ZD(B7UO-=KzexK{nv^%^y^@6{@Na8nfpPFJCgY%AML&e?t=hZUn6>w60saFrV0 zt(hA9&2_5l2REpXU%FX6*S=MTT-)A{{(}C*mbtTyzIiXN&<(WtVe~Z*-G5s;V04Lk zeE^6776PHbRA3x1(awh|Usu3_(MjdU(x+R3(>do?Njg1Hy&V}1t^1bhPA1vCPxBMvMxv68SDsZpyI&y z68N@Z9j{jJ{^wfw%9e;N@ZB}|0e}5D_wn8sliYvL?OI1(c>Lyc^g*io2lckYcRY>z zydJ0nTmn?~XJ6P)TjX3>HK0Cl8$i3A03-n?0Hcqz3&LzWQ?q|9${zwUu7`XUd*rsi zwv^O9$MyAC!`}W7DeukrG;=Ssb>kB_ZWsYP3S7wcA87T8q}}lYEt$+>^FOTa)Vlg z<7q3@A!FkQ=G52wc8Ao5UfRGhJT5Qn!7&;*PoIx%+CS^0k8ob+5cbjEu~6cg2T>pG z$Z*1Tjzhov9^x1iXZUL$?T0>7l^f{)z zhBt2K#W%}M`zb4)%YCfR_y%5QPsrseVsKT}=aX9M-|buIab&xPyzw8~OSJX&_3EXk zZd5%#s;j2=x=5}0B`_W9&s8e0eTMZI^%ZGGJZ$tA@}kYtCbQ@=%bh=zbH46wpbxfU$etgSn4g$bSdu*u}I9 z+q|bN7~l0$U03vYS55T!sqFWGG^X$aO8W!&gw5kEl{mQyNM*=G`GI)x7e1{=?zubk5{WNUuT=W zHe_tf=xCdJZ&fco*;4nPTM<*R-=?0M`b}EYwwroPN}s}5W@m5%6^!xCo6o_9&=$xp z!%rFCM8EsN|C*~Qy_zY;BD8Pz&1@&zW$JWtvA2ZyHrLu0#&*vZTB`Wxt5THFK|QrD^tzk2ll68Zk!1|G~cYkLO= z+8&wu!00vYL+6d&GsV7(0LBIJfbj!Po9SjB$8%Xnsj++Keb{Z}z3Z4ADdz`yKlEu# zhXb`7$MJr0^kcAd9aA7}2<)NV1KR%WJhmwh!~oOIfv<1q)hp8}sK|FfSt4EtIz z^eXkvf12w#y}@5!r~6{so9;KzUli3=igG^nlWk^7J7Hf%pTM}}qnDcNc|?x8$s_yJ z9vCAh!KP^Uj48~yMQKBR&Nc01zpDM~tOkgyuho8v;}*tKoGWl9>;s4 zZELyU9`kdpk~(H@XhXyY_0+Z*Ra7cs$6vm$uI>ImPuvWIko`Ze#1DycQ>uXsf-2`Ox`}wK+Odo^xniznw zJ?~>LpkpSa#)swkY{GDh^jhEuK>pM1l=%$%-!lDt*ncDFmA7xn&>h;94fb0c&ra*z zOzYnP#I`KUIJTJkp2F6vZOm>nZD&tt12y1_>-Bs%eE{bTj?9CffPc_&?ipyev1R%J z`XJ7WzVJj#{j7uYhN&|eX3Tk*{=Kk#l+1Yz;AJ7(`6T4liB8Q&C*dDg7`*^3# z+q#ye|4D}aT})d;TBQEi*Cw#t65zZdZAX8b5NVLKF+XRKc`x>bBWh5aSl%(%Pjhc{pjznPwo zEG;%gKft*H&ebg)*;IY~=1poFVgknayMv35hmw&~1Ni=in3KIo$N8L3Vt;SzS#GI` z0Y0TJ7L>pxa!^+L#kh z95-&AacO>S->HL)0rcE8(jvtGPMd(8Xn%sVF7Pkgeu}Mr=e0^QkG7k6fIMS;f9(LL zrvJ~?{^=KX%&L}e`_D-a%EJEQo*ygDdoImw%j-Vr1E@osPoh5M17;rTW9qKaReyS1 zn0>Gv^E|Co&-dkaZ@34}aTMBb8BUSr8vj#PJ7!g{5cZ#!9!q)QVqH8RlYF0p^{eZ}Pa_hU|Q zcW@0n^Y`Qx2N+#k2i?@)3_hLqQQoiRu>V@fZ^?)Kr=t%S2nW=GQ#MW_CAggi4m^XfV?y2qTkJeT^P4< z-r4*7pU(gV6`6y*S>$hQp5wy?^a1n-Oif?m*C#Lz*b_n@5SUvWV4ejA+xG8*ZWil2 zKz?kcJRbu--N5z0KY&|-*1p&_JI5p>y$JARF@Z*JfxnS8Q3t{J0?R~7w8Uj27V|czQ=jJ0R3IqVp7Vt9LNBb^FJ8?Yc4d^GtClrLZ zd>#v3(lHs*e*y|fw^QZ|ij(#^TGpwdRja@jHUbJrvs2~^f>ZYKVF|O&(Yk|cyl-FU zR3X~vyvKCd|81lS_Fo6r`MZE^^SzIg_W3;x{b=9MMQm%Def}1tm$P1^B_2m_n|X-~ z&yEB4hc(C&!|a%K>Ft<17_ld`?s3}>&G5>mvx(Iup}1GJ4NfyaR(z)ipx0PU;_Fdc{hE(g{D z!+}cyp7%8Hke&Yzn-xE^N+sAsG_VRd57-Cv0BQq=fR})~0NT;@z^A|gpgJ%f;Ca=6 zL|_5yFP0nrp#jDaBQbx7v4I{7 z&(IuqD z{uu|{0UQEo=dS>YydWJ9gxL8N$oI4J)Ki|<0$~0{?vI;!7VUpCu$0?KcLN;&4{#88 z6le<2)@bLi0sHJ@M<5?%=VOpx%shXn|2_LU)scP#q}uz3AivtqCn4VvI1ixh(5LZu zw(nx#ZRUOKpY729;}$9NZGh_l+J8;pzrbGBk8~Wc#?CKAzCXb64v+26{ruq?V}Rp! z{x{@9?Yu94|3cZ-0R1l0wt(S%6!QCkOYCz`tNrtu=C=cL8nAQBBUICNw`102H(~rg z+nyi59AU$>9YK6wz_0!D88zQmBbax>>^F0U&N__mh{rjDfLl#_kk@kvW?RyNF#w(N z`t(fuzZ?7h51<~_1l|PFSu0ZZL$mGtD&)1EBc)B<1Keuowf)CuxBtDc4cb5b!eO92 za4kSvyBv5OIKcXmjs}+5dHRq@=J~@ljQz8Hp90%}K!EXxvHxV`85>*(u>XGwxB_5X z>DS(2-jDrr{7(PGHa!U30I*N52fP9p|3v#=&-Nf)4h#f1-r=#@FWPOT{r_#}=|6Mg z?|zhB2eALY7T~;rvH#J?mkRsmeekDaf;mk*C)PZ!{Q%w%cpc9W_tOu+KcsJ;$@d1c z`2eR)=a#3v6J{+7eS#TB@I2Yq`*8yGz{Mr99?Eev{4CH42?EiDJ|NBsO4M6*6`WRsB zztr^qzRI;L_zKVO;Wug>+BWmj`j{h(;dp>!0jxdA&BhB;M!RRsVB!JhX8}#wE~JbF zv_JA~r)>LY06Hf24{Gf3KW$#@-@mHlxEFJRdX6x4+PRlvZg81BAAos5d_x4^5%Tj) zpfI-YJjcWaW~}f#?EY;wkoHeMkYi(|APWOx|5@h$#Qw8rN8TG)TdMopq$y`#0^47o zY5SS$1yiP;Qz^5Z4LX>jpUv;P(XU;+yo9OUeh(C(!;m7GOREFy9`MI3T;&vg{H27yHkm z5Cz!4|4`6zKh_R?Y|sCnaN2%mz5%|+T3f$2Y}=@BKVDqhP9IQ-w}R`*d_XQT75f+a z&qY0QEW&)hwySM3E^f3pw2oN=knwInrtN306NaCFo#*>IVukey#s>^x<6-|21LP=0 ziU0jP|C9N@T=c~h!M^ocOneVylx^!-;(lNI=Qf@R;Cr-1dM=nBpJ3($c?!1n^&z|R2J z)p9Lf9N=&LZyj9kGl1)V&jLmOdQCo3uK(=}Tmo?I>@xt@3$v|UH>lST`-%Vi;aXgu z%r>wU`fJOO;iuhHYID=;7UIqWhDF!OfKzMr##51UExV4V zkVXQ1?EU6EzAwnUwtxHmpNw~RU>nH^BAv^#Q&&dl^9c+QB-J4hFQ%BV7mh zd;f>;#=Q?D*zM?t{3<&ikNoEV{Q~_1*VA&{KihX6@CNff_MeXHbhoe1Hu3!*u223Q z;QLAc22A_7{@=XU6p4H<`@CbwJJs=YcgMg8O;rqB@Oah(00kawnhSJ-*pPHb1R>(nx1{&fHNjSyZF^L`_3n=a_s-?smn^FsSuo^8{s zdbZBIC``2M|F(GycnjzN^ag&k!EXh82k?6Wj{`jREGHM(=KDZ0@9!kVPaZmD z?4SJ)ZHD@H5;zE?1IK~=0Ckae#$%X20{mg;SP%(HE2 z0R00~-q$YJ|IKWxz5ag-uERF)`Ue2EkKY7fo<4y0VcK^Td5&RtEZfQZ;<4QBz&iQE zHTnbOl-JCQzf(Adyz^L|LtZHZ=2roi*>#=~`{%cUc>OiMSZD1CtsUiWy`Q6>_t*Dx zAL4)X|G_nN9Lf5UCI{+wcww(adY9LnZ>L#&3a=x-oEd;_oef+KF!m&_0vb8;4UunZ z!{nP_n|X6AkLSB<7w}5S!RNUSEUeW4Xa+R0)2om+w)0JqHnH>E$L*^D9?RoQ{u<=# znAXE)F9xm!8Usy%tL^+%jy(6VjK?(d$!DCmX{uYV7vCFkWuSdcXZzSTXZx6Eo7h%o z`?!zWZ1;6Qb<1lxR`vKej z3o`dd``7UTzA4z1?f*Uerfj|p_N(9FLF)TWK;5TfKU01ykdS`=(AIzXe)Q`^OsLdVD89j}ejT_xCF#p{4Am(Z6tBzc`Q)d!c{59H_Nvs^>~? z>aLNQ_b?Bb@+|q9C?j}YZ9sqfij?hNQ1d*X&VGEe3#fs7EpNVN&8+zv*yb_19LHtk zxlf;mxW6*%jGZ3n!8M%MWSgCBMLV4B(KSrDB0?YvZ1o%v! z&la`<{Jsz88t1E++jLw3`+d*0Y++v%fRl9_T;E5`5;RzvGFN_R;~M zUpN3f0&otDHd76F4bb1$L^>QW=S3sG*xV27*4RJS415HnvJ5HL;OOW7NRxmL0N3R3 z`P}0`b%6GN0q_>{uz&siU)cYZxCYyypX1y4Hb}1p=&R@-{tN8kwUGV>thV#?8T|m> z7oRKW=l|Shd3pI^&cCSkkMTl1g_!>Za)Nk5`u_5w9DeQt`fu!Cum8jLUV!#r31F<< z9=Hym-EnQs>%ak4g>)2P&f~g+NOK9;t+D@F$bSlK0|J5JfVN4b$pD|tUkLCy{Zqgd z0PX*B;9chZ*neMKi+0R5JqX+YFfOPEyaMR&{~;X*thd{`9QlDj6@bTFIv!xwP;k9ood_+LS_cW;?~fqssOwL}^7L`JrfmLCRS2gd&OIyr39{=We@FF^Zm z2k?0zZLK=M{@fcF0cVEX@R$a9R~&;I-1TAZ_E zn;rn@pJ>x{fR_P(_RrY=N1!rLZtWlTs_lRK%*zTh|F56>Vea4uYWRWKjdWe*N89C2 zd>Mc(i2eJLQiiuNXRqV+t9^_P@rq|2)6@;hiU}r|>@mKIQrNKLOJBlZbNu6Zp9 zA+Q3ZV*lmS{{LQlhqhxp%Nt_bzwaC%w)M0AWyzDz((i)enW5Nz1+aPNEsFh@Py5IE zA05ZzJHY?6-veU*Z(;!S`>_4c+KF3dUR;OzVBQ;a>W1XYKLcX_<x*f6V&Y z5!h$Wi$UJs`#)UI`!TSE4MsW`Fzf&M{!d5XJb>RCb-w>|5w>$$|J#do*=^$cKV1LI zcY109{{zhXKV1L6#y)l_@_dhx>zd2$`#-!_=kMtJ8?+C=cL7H5y&k;Jvp99yd3AUU zo)gGizmu#An1?NJ{qF`~H9-5`1M~)(0DS-N4S+Uw3b+N}`?LE1zH2iP;5YB;0tvu; zL!q%j1v$BJNcfE|yd$V%0#hbw#bp5Y#Qy(HpgYhN z=nQlLy4iW|GyA!ZWju!YA8fF^7x2RFaPQhN*zK(Vzah@PpXdy9^U3oYA1vd3mh+q9 z&OEojh5gs0gwX$2#Wgwryq>dtCePHgmG{H8vW?6$jygciw7mQ@CfjUwf@gv|f@ z*^fNu6Zt=W_-#?<4+H%FWz4huIKcVG0|4g|dG0=d)m0x(pZyoD1;SzV?2xe?zpS!aylx z0JiG1f7-nMb~jSy+W^-A{J(#-ftLaOePX1(_WvXH(Qoir{ryj*V*jO3=F%i#z%T$C z^|gQY|D0bm_J2L{jP2_Hw12%m5h?9ofBzRL`+wU1Spbg}`!@tjDh!lj24JiH?B9j` zwEqVH_W${{|H}5UV*jOB=F&7_z%T%N75g`2NGc3eFb2f_E11CLc7*{417iOUG9)Vu zR4@j_{wtWk<#vSu2LodN4l*Pw3{)@%#QrOoz~y#@0S5zO{|+)FD-2XH2E_g=n84+B zg#iZxV*d^@Br6P5Fb4eCe?RQwI)1MIYYXuGAFlu7`#-M%`$-Yf-+&EvemU|#0et^Q z*8f&89g^EEy9~fyo$LSj{$F?7rcWZz^}p8uzXR2P=YZ{YIoJQ|?-L_k1eo`Rjv+7W zf6J~)NXsh_1F%*7jSi$&0(_r81>iS8*#9pDBJBKZr2MupkC_Z?1^7(>e*1&p2;h0s z0G_A6|A|!I|EoZ{B)3}j7=VqM{{J%MR{*r*T|hKI8{cl{cOu;a!~%POL}0U>k3-7; zH?R%haUYTir1JfrvZoT#;tI$BZ581E1>twV&Ic|As@gF53$V>&SkB{2-g(|7*nf5f zq)={I7!U@80bxKGD3c7J-{yP&j01`aJ*Fs=BuJx$0bxKG5C((+VL%uV2801&Ko}4P zgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801& zKo}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV z2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+ zVL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG z5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@8 z0bxKG5C((+VL%uV2801&Ko}4PgaKio%rT(kCkzM!!hkR!3*9?;BCD=sDr-) zQZvr?e}M&=<$gKJEVlgJL1y!C;2qf;;Ao!ZkD?5AcpX0tNacR>b7il8fhp5zhinIQ z%u-%ETX~k^>*gW1co-k^_o zP(@Y=W-rew!7SRT8<1CdRvSt6Hrn~OgYyRcl;4p@d2MI;0%y(Md=`QVQJ#fBKj&v5 zu%q{y&cAt;XCctf`B@0et=wN(Mnk|-cG+f>dhIlGGh0rRxWMX=tpGb}t5mO}JM2;Z z%TIZ`qvKWmmZR>DT878($f+-Z5%TMZZ{PEX5r+^Am?i8TE^Ybcq z3fSNId6suDcjqUi&~qzSu;@GdoR_)Fr##iF+|jztvUJKbZQ+{}u-~$3u-xPA!H-)P zI8E@tn9znnBN~M42pZ5JWE9S#OT5FJtrW_y2^ujbvz+~f!v?dLr)Kfe*~{s(I%HhH ze|csDvX`TNM?aXod;!Wc8|c40Q^&HGcf|RbI+DE{#d(%z>WKgIGYQC6?(LE?2tqmJ z|%Ltzr4#c$@N#yDW5Km zPQa_5-lMaZHwYNsz}g-#Ag6NZr@ntvo+*6q9;3g-Rqiw&*6CY+ZSRFL{uZ?Sj*K(C znXKohJFQb6;9K4ykMn(secG?Gi9c)oDw}wH%Dd!Ie;?m+`kx^9AA5g_Z+TYz@GV7e zMgMo$Ve{D!Df%ARNv@T^zvfz=d4K*7lX4^fvdK?g^3O|u{NJ_g6$XR>VL%uta||@W zFrlL!D`2=_)nN`lmp75W!hkR!3AWd(%ywLJ5BTdv3-<)dghad)QoC#)UrN?8A^Cpm6Tz?Lm*a+ERK zIk>jvne~!&R>&vrv#0*os|V_hy2wc`wGTD;>HP{EvcDyz9ID0!1#5eS|n|n{8e)7%sI)MgZC!K&q+&;pRH07 z!hH~-wr*Xc5;sj%ODDEbvwK!j@cAmNdv?t3VJQM-$LI94wgK~D`~Bzlww_zi&$@d3 z&-||inZIe@TJAYLtN@gWE@aYDY&rON_qNpK8QjLN8QM50dRk}L_2!iLxsdaGAVR?> zFii(eLjGRF&pV0zr?#XlO~Qb;U-%zwENl9= zmJ9m>7Rfjw-{ObA58%OzhFukyuwiVc^(Jku}X@Wp~45% zRUxqZ**yb_L+<|SpzNs!S#$xmzXAQj%gY8^XY*cW{eT#ur&lMqEkFMZB*jkCvX5Ev zLzATFsjE`r(YGf?(6@fuy)ODq`qW+U~>P?-}bk5tc*jM>{S8s0;K9 z^bgu6K;|jXgV&Z1v(DljQwI=BX2df7S*VzMVdq-*acf3AoD>^$%p1>Xd(Yah+j@`; z-7uhTq$2*?k`$qKZeOcbO?yzyMGPM?AW(((cWKP;=Th_fy42j>E;XlDMvR|TFN!9A zr|y{8(&)iv=)t3u?4+-BY#=eBL(RACjMp{+-~TqoaIpD>wD;pcW-O09om->S-kmGefjw*0!M$tL(ZeZf+oqq?;=z`R8WE_XhI_JM$uN&177z8P$iW`9 z@FzF)1iH}&v0+cI{Tp9V1Yclu!Ndw$4-g-OVw~8Jx6Zf#I#J<$!IoXII=;u4ZUE-; z=;PD$xDLLYv3mw!JkpE|OC<4L4xhxV>XUoo~G)4CzcC3QDJ?-`nhlE zdryp>-Xtk@*6_qlQ@SN?7}ptdb>AcQ`!-?2nC}xePV9_%n;u(}7xdk~Giv7k9SdV2 z=fs1%m;Zin_X>4r-&%EO?^^Zy(XA?B$%pBS23l!R!z<-2d)p8A>V@$cOQ0Kzhj`S2 zfo|;^=nJR|MT{LX^Z;_7jd8&h)C0r?y6hIqX`6P5=(B7?cV^@r+d1~v2) z1;h{30j&##ix*6ss37-U7%%=G?+aW6U=G1AAI|$O5BY`meMSznE`c3{&F<-n#uzPj zPEXIaIlVmlfP=^%UN|t|VAP1p$ADc+hF6YRGOW_ZrNb+&iW(j;3Vr<7kprxDv);FA zO?l9*?Puk%i!Xo=4@J3(9OOE+V1T6-{Aj7g+SZ{HIoN!D_fr>mEc=N0h$T(`P{=re zz91d_K^o$KHz?Pc-&+BT2bO;w2=3zT^C0`7Hg{(HY~m*Ecj<$3mW|WKEANJ#Pg^#k z(gRV$1H&Q*S!&&+`lnK({-+YdA4V*A7JRwO>lccs18k4+36by#9A7|&h4lw|oWS!j zhuD=m5Cm@U{Hfgf0oJkz5dFx>YjfzR;{kDp?)6O zMj;#d8so_+E@_2AvtsxF;gRbe6H)aJPlsf7bEFHnTBL0&q*a-#>FZ=yU4 z=nuR)(90S`9hmW5`5GGzdBfYUK@S!=ZJ!)5uFem?xgA5ZT#;E^1*H$GZ zty6y;T%wkYI9DwhYL5*HVc)r3*Iya+V9{Xjc%gtgz&T|4f$ymUIJaEP4`9wi-v{Qu zZnbS+&xzT3pkwMn_`in0yq{}Jj9Lp zfYU2`r%v(S7$b7bm`@#`ZE$RO0&()~nm5~im@jKT6US_sX$2zgi?`*^cwhICk?@nn zD}VOGoCD-D0HkM7_P%<+=MDMv1)3l5l@c|`x{NYE)8oQ+58dJ_3`Tx+f%g6L`?zl6 zy;HX-d)h*A^!rYZOKYBUQ1k(u)8M=TpEvRTLv0%{w&0VVWI3K!PNw|3zImzV?dO}h z9&g*KP;$sgcC5Q3f9U(Q@5g%Ccd=Gd0VkPa{Vn{oOD!AWR!fKH6Fe5s=h%5}=llG; zuH&q;gz9D6myL8|t^&`ZF=s%VuzB&9YOWUcwO*r)aO@e02cbJ2;BufIPy@IKI2Wi2 zT;K=Xe_s2xx19Ux)7MmG+h=wS96hUxM+J59oDAxU6qrA-64vPeKV?S*_Aej?dsA-D zA6Qw<^TB+atB>VzHn{DtPTzgb^CD1>vwr4zyt56y=lk1s*2(smw)nO?d=TzsKqVF2 z-OB~}mx-M!X!gWe&$YYhJa9|ynHq50?8{9iJmx}xGNdf8 z1Oh3a+sz?V9WK8RX6OQ;)Pzk3QH^J<_J7dJHMIpKsq>z4%0P_1`DGcmermaqi=7 zTV@=~$=XIWB{g0!5)J?pAJg>=XvrZoS4C>^$ zti#mDlx@)0LHXmj_M@B=s=c=BQIvCllpGPDuE zx#>K=-GjFrd$>*OzhF}toRU`?X5T4nlY7d9vT(}f2)20~%R+#Ufm?toxyjzQ49Bzo zsSk7lSU>AzJ4_px{spW6hC%M1KmKs5SH^U%wrhOXOV!9uSETp(q_+C%jb`fQXIiPY z58SHm`B!Vz_WoPJ)vbE_-n(1lzFVnp-n>bT`t~|Cd*Ib-+32Qf!*7jM?8HVYesUuf zH>r_|p3qpW`L&5!JOb-H`!`dgzq?NT@cxbJt>-C+TNLYg;GWjt_*RW}58Vpc+^Y9K zg7m%rwopHPeLc=a{iB+yji`S!>Sf&<$2V5Xam~;luU11kT&F&IskyG>UbL0xvMyaO z(l+h$)F(4m{vefQiB4qAhYTNLDt?SH5T@4xNTOHbdZ{`b_4>)YPnx@Y^gtth+y0?z?2 z0iObcfF-~mfKwJmHpj5P2hbEK@Y)le1F#>^@dxz+`C7mRz+5i|`8NQ)9ykm3j#J3f)!@ z-ghf><7PFfM>Dl)d?U3tw7xoqHn1%`mu)-3x*hFiU8a8Cn`w8_)GO84&eh<1Zq#z8 zU+ML69X0pI^G?P5=2ly$TXV?!!*^$YMuhv)TqC^;XI57S<}}puj zZ>R=*aXsw0vHE*aU6j?=Ta#7> z+A)pQpZ5N}IL@5ouYPB}ye?(-=Yo1FWuGzBpWIOE#BRu@F!l2sp38QoKwnq>(pde4y3Mr; zQ>XL(c>dndM(|%X)sC3~3jEPtG{9M!&EHz2dK`c>L*7OVeQDk>$LBXaQ+4ij+?&6)vOM#cEV@ab0)1#ey-+Dr&-SMS_fl5ivqWvX zIP?K+bOG!G8UYQ7?XxbsVQ0`4H}4F({3aVW?+vMa)#0#4^$vwLtbaHRuv2amM^Lse zq;^vrfAh9!)f#P`a#_7?Q>)eAHlOUCT zpuw)-D{k5`vwAb0XX<2qzIAS&UcCvgzdxjIW8GFq-K@`Chh=+$Yc}Jt_WharbfzuQ zzJKivt@H5iklLRj9kVyIZVYsVzQFcn4d@?ELI={leSypL0eW5-Tm}H-mv93ffcuDI z@9&F4AAIZBIK}HHR!y|(ubpJI0q(YOk0bvt|J&#(AvASr_X!`ct=G*_qjrVwT=#mEiUNP2czG1Q}3Fj@h!Fo-dyw0+XlU<4PhgsDR zN7nro=fwj{f#m?}WnE0UZygZaxxMAWdGmqQJRe})tkdL|A|DHMqI}|}RjC9!$Ngpl zWL?7pyAObz1JLJZlM~8Y?g_0k1wH_M0%F1`IwsU(C~&z3hyYdrD}hL0GSCHR59oIc zjE-QtRQdz(r0*MPU*Kcl9pEeAL*P9-{S4^`>nB+QRs5Z2Lk?qr4}cGW_w4jjd*25K z7S-zZdsKtxmyfk>+c?Gb6V83##-~U>a%_Ky^3e-NTD1=^uJ;wrCq4l_0^YarHPUx& z42_*0coxq81$fJ@hwc0vcpG4QS$_wD+#@Z}8V-z&+=y8M4$ZsHjWM$J9lL_AxE-;= zCin!EIJpx00_uR#1IC0-nEk(F|4iU6TL*L>!F*}@fgdCP{>Xm=P)6U`>6b{q0H_aN zLH>hO;=igu7UO|00LuJxfO6yZmrNo5{*eC*kbi5)cO=fGd`w*?rLKL6^05m>TD2g5 z%DJ;$2kT^A)P?VW&j9Wl89mig8Rv}yC}&gG*T}Qoybjy?EzprXBQ22powC^-Qp*F| z*XO59t6CjCVE3NT+L#aWPzN}cW<21FllDI9gwX}+f&OL*(rkXldCk+E2R}~vlRL_# zlbw=J+CFvQ3oZYI|D*l?X3OMbq(%pfj6Q?>``YrS{SU{vMmDU=)JePj4CP}Wf7(Cg zPgy(lihiIIKs_Lg{Kw+l5A3>FH}!z+W}a>BNRUgUg_b|hKN8Vg`vCL_%$ajEjs8urHvD z_53PQr|eDsZ+k!IkoEWNkm_srZ^7x=>yUpxmzJ|HxnMfV-XZ**$)5OoFKQ(>oXXyV2tN4Gi|7ZNi z{+RM-n$hd;WX#tG? z#{i7=DSOsSz18(0WqiT-r4#u?TB!d2;M^u&-yd3MTn_Tr`hdCAH)4LX{)Kp8BIKD2 z7&|vIXLYTTG59CzqQjAO=r?wt4&ORsu%G)9 zQ8v?Vb12#q1F&7rb~4X>oOAftKDNN}-w{+z`{jw9Ef;+MrmVi-%nj=21`4s@e|epQ zbFbpuUnPKZtp5NW1RetJ1UUDa@qC`gm$o1Hx)5jx)boS7*w+BKnC(v6{>a%X<-V%G z`Hqx%jr7#}t18ve-Jpu)n%K*A{**qJFREgn#`vDDb6KZ4@p=Hxzd)tlZ|Y(FRqgGn zD*4_E)Sf5w_ngp zUSsR@3;cdB!Fhe?0CnVy`+R4c3YySKriH9W_yt@ z(YOhHzLx*4keUrJ1~76rDRn^SF;8@C=d9{=*rr`US8{&U`5ZvY+LpO4<8h_+2Rm{B z?p$E{3HpfzzyyH(@I+t|Fxk#egr5&zGr@Z9ujNf!!EZ;R-<}9e044*I?Cr5g$Dw?M z+R)<4brbcR(`?i+-qd5~SvU8we*N49X>RQ_Wrfxe%<*XX?+LBd0I|N2yGbd3-S@*c zL{Of`>^XwcD%;{{7dY2^BE1wC4RB8P1HjA?GhXEO2ata^*#Gm8f9nGaYYl{)&0H>H zMa~(U?GI2sO0B!GHso*S8BJ`+`U%E%?*g2cW-M7~x#pJ#J{#e&=m+XS#wPC1CV%+; z|M57C6$%_<@Vv70GrS|c1mO6K@}jI5Z*%-*(k~$Y{*eC*w){sx-W`t)%7*D%((C(`1n0O{lA_Af?jM$o>E!I zBRlQm%aS|`+E&9mQrbWLKF3~Wj7ZsM%jX&r!^m)i0-egJ7buA#!GvS9lmwaauo$9u@n58P`$ zkX8u!@0;CF`*isBUbg(XF60FBka-VshGzj4LiW5@ANkY&(>`gFw0qjDNk51E_w(`p zoFAg^H~oN_+xy(d{=fIBi~i-b`+wdq>|4tg^T4;$-XZtDwd_OdO#Sss%k|fy+e*A2 zVmnI94{~tY|IgT_Z|0mEK{-(un0^fT_jJU6gCTd$;}Pr!7y~lT^#dP4{-f>qk1+(F z+tc^6Zi4Y7pGh-iEXcX!)5iOW$bafo4g4!+}s>whuzEy%gjB ze;!|PRXxaf5fBE<_5t_J!JO`FtSLDcb2=~JJgx}~23T*HBjrBMIrJqDNKdQn;aQ)) z9{PJX`i0Ya?K3TRxxQs#4CvzeK+Fe-?7jBKIX2E05*hPt&TZJg>(6CPT=4I1*tvnr zk>@^c>*pd!-vKrN3xUNBaE&SVtp@b-C!`{Wa$=Z}r5AVv!FmE* z7r=eLmXq6-+prmcOpW}1K)wUOu_EUN&0I0}ajuBqT%y>1Ino};)5xFp{w=_H0*(`z zQV%)-)CFP`Ao8ym@;7rtBL8wEZ{J&iJ?CWqB7a}9%Hg(;{2AwQ9Vlh*`~C;x0`dRl zOa~xSWB-irDR=hyjP>|l8P^F=A2>E7h62+6mm?j3JdON0&YKTR1*Y3zeg-fFm<3Gb zts@m3C?`4qS?W1kToVm?S#|i0r+v;ErV0HM_HrvO&+Zh$4dbd5M+}^g#wo(i95gqIu9bMiU z?y$ElOMkU1w$k?`T(QY2j^490OZ8sO?9rOX2+jsbE*Y>-u+0$(z z>6x|bT}oNk>kR+64x@>Yna6t+&N$@UPC*Ts+uj?fS*o z$6eNc&T>KEsooP#rgeDtP#VgPdu4d?>kjW8IMCq^+>4fB#`bY9#g1*=!i;USgcq>4 zJMd2QHtN^)rB>I+^ZMty(74pvEYAF8Yrm9c%;-bA^s?}*v9uXi$Y`&_Rq%#Lj7DWCG=KHDeRUdQimw%741=(cZtzSm33 zXuIPgk}Y`#n!wKQ1UdpPs}BC+cc=asR>15Y zR=}K|mS#__I{_;~?awyeo^zf+>V+f&OWeb<$1@CCR7=GUC#G8 z<2%buKIZi*J^$M`FP>83=Xkbl^WJyNq7I&<=*ib7#m;;ud2{gSq}W-DljFjowOeuy*7{xZjV!#U|)MI`*rSg!w8paiQ9hyhN=EdQr{p5vW4DS}Lr&e}Ip} zKsfICW7tANbFU#?wN8|(Zx$s*Pjl}&{Fl~sjJ0mtwHQ zihv;9LY1P%20P#X%)JW;SfkIp_p!g<|DM^I*_m_Bl-)ZsXGYe|v!KawHeBwsBhGEI zQUimioFQ{{R(jOU%yW^qKp}f%b$;G4>4^6yEYx0Lk?XZs_|GB>Tc?&0vM?ESUgr1F1?fZ9LqL?H zU_^R=4xUNB@Voa{T>kVE+XtRI<}xw)pw-Bfgr#FMPwyO6oFDz|)v|MQuT|!5xLTQ; z@P|k5>v66EbRcrMNq&y4N_I$tL{QR4%mxM5MF`59bV21_2XdVtzcSQ?k)#8}Q=!jn z@V=q2r*Q?LAEI>8Ke>e13;BU|=*L$J+3~JIPQoT3HEyHu$H5IkV63ySF=3PH3;3@& zx~nxy_P4A~+NKK~h-&#|J=CG357JHCCIyRHUt1^Y=zwG=J@9@4JS-tz@QzT%I6ZwG zAV1#bp!@gLJc&}c1J!3Ol%*ExH&N{x#G{@c+o@gcw__rkG5s4B?>UksZ4p?)7R0S1 zcYU{9FXA`fQx7CR^_dJjIm$X9`ZTYd^VB*~fs`aC3AMu-&EW%N&-zKO(xUJEFJ;{xC)xbpI8?K-UAKvAlHK_q=RjX3{)S#|AzSQPhM$Mj=Hi@{D-|FG<|K1qhJ?- z_ZPlR@BF?+!G7PWMv}XN9p2VBe&2#Nk8(fs5b-G;IZaM+&(j7yvK{GzqJLqL!~+0c zr0}(Z1bXYBlQ59viE?}DMn?W%qze4`EvLKwyO*E~b_ zgBjRe-&c`K?qP|M0!n|1tWLLB^v$d8G?@ zHvu{UZAcD)e+qS?5|&B<3>d3w}cD9B-jG>wv)W{f(;s*ws^7@#BlqzSVuCf?#v0BGOE{8f_|F(}3LMSBS7P z`?^VP)_tY1BL_*lSH2+S`dLWjb>dT8y3g~s;OUJUHAr$<+)p~aw>wX}Zd^*AryXU6 zvx8099(xmJHO)eba&5s<_9{wqeT0t0Zzj&u0{;XvU^{Re(5Eoa2C~ck2;==Is3B1E zjdkWXEyHvBg=^^^A}b2%3?9tb)#xtlYE&0i8g9x8LvWsLJrv=-B+QIeM47T{h@0bQ z&gm7zr?9&5>HbQ%8M_{%jz?)%;yHT0eq8yv(r~ltt5GI*E(hvWp=|DCcnU1tLs&<$ z_&c`N-n+6rmlJJRN%Cl9B=>#)Lm*u4Kb1DqyKx<4c#3V1c0TA$oZEY&FvR?JVX)bs zMIq)_ibKt>lxTqSJ+Bvsn3V%pz&D{3{9Z*pxLOu&URo!9InIOqlj3ROmQnnIV6*Gc zLj}bnsM9s8KwO*|Y>KmEImj)MUJ!>Bg<7y|@0L|*`$>MW1bqI3WWDGwh_>HbDBYJC zYHwZMNil0e@B7M4&&U_)Kz0+&@KlOOw?l@9*b(Q~nG^*Z8|C`9eIeafe`JQQ{xD#8 zx=-_wxdCnal|&i8TO8GSWN~EYVW6W+B8^|q^fefi=G}a>Chmv~AN>ysLOb>?iRwHY zaR|fU9#s@(^kSN~{yT`r(@pnnJ}T3<<&dJ_&YcT_I&}d|h~iAmo<$)h?*i7)#ZjE8 zDCPPZ+(8{B9n2!0!LJTc`NF*dxJxcM>`~|`FZH(u=n!BxumNz!*^@g`eYM9{B$)n; z^C&lA58{o$j`D;q^G5PztXuc59)Wc%nd=%5TbI0soXfy00sFqqN+eQX`jvnYQ>hHrCz)*jC1 zeH&->tpMn}-Z`f{g=OdbbiRcAi*Ozel(LH=|Z(uT0R$g!G*vM%JQ&oipxQr6m*^{&QVq1Om!vNo2od> zoVU@!kS?#_zB=oG9_c^=ztkTPWcPl!n``v&*!2K>@jeptp8(0v&lraMOVj*xsQl?X zYC3;{U=10kBWw!dk^CeZ$-e~hk3krnOGNU|gZ#FTe;mT*o;e`W`NgDH(gUU6D8B{4 zow>$4*aTEGVNVHxfho%3%t80UMg)WoB>>wq_iKhTGI zSJldI>Y$PT22s2h7En5<3tT_srVBcF?EX)kLhUb=2cXn|=L2^Kfa)x@-zBKW%kf_L z0p&amZGz68Sc1OcC+K16`GDr*(Jt%}2lYZAP@VsTasbr^&u!0@6o!~^I<@s9UC&-Zd*$2KVU%A!!y zZS;I;c=v~DKNR;#T;Nxhza`w805`xD*oFS=2lVq){<{HJzzu!ChIIdC@1y;%M_XS7 z*aI$z`!?|d`cU#yT$DG@8@hNA-v<2sPX5p8g3P)=bD$ZZk2CC5*OR&_(f*r5_GW-S z3lN! zUZC$F-v5C{~zdY0H50d{a-GBxjnRooI?Q8 z17RreyxdkEy=FB%@!zBYlr{B-crV_CXWoDRH|em6*Cq`#X`o31O&Vy@K$8ZVH1IeY zc=|gL{M8()G~0>fXf|H4Rc`@9+pE|hZQAI>nDW3bAGGmFIsRVjsBd}19MTHzWg1KO zHk!R&2Ftsg7~kUl#)H*+z#t)hPYKcmDz%DhWfN<|q-=@r2m+eTVhuF@nuzg;dpjkY z*@OO>uBJD#ve^l`*U<7fW8BrbBxCiBlwFS+tM8xcFYOc2510Vd>=R**o2IKt15Fxe z(m<02nl#X)fhG+!Y2b-yAmii?G5z>v@p#}k5%v>*`|(cL@Ns+2-0Z`uOGSAN`1Q0F zM6$!c+IViu`FF6r(thC^1u%p}q8^dF{IZ0WmM>ZD5Y&N+Zx?0G9UBDl(%kT%* zVm^(BkJ`;;r$_L)>+{K;WFzJQS#FVSU{-o0%gsK*PDU+cp=-5SFzn)B z4_MdENp_R%-(lFd%z-R?Eo(5^3CU4cuBYLSbMMXbIDgj1ChJ@{c#dFM=Llg{`FY20 zo;kR<3if=};p-Gq_|Ig|NOp}JRVWLx#~>Rel7rip#pPvLRfV}Buup2i;>aEYwn~Y0ZSkB$*e8N-wOu0d zF58@OIly-*%40m~oNR5LvnZ{1&F8eXa61uj=j*j@Ja^ZGkkYihdtDyDU0zrII{S@?~ z=G{T)jeKs9jc@Em5jIf@*ayMp3wA(puHucPUxbw4Z9{HtSUs5?-DAZ46JZZjWAg+% zBH8{c0XBhTA6Z8Z;-A|*L52v}*zt8Pu*G@gx>RRFy-k!{`l) zn}^(0u;Y@URh_U!2R1kY^K~1_+&3>{$2~20J!)XjMD|dCY_~+VjgheDqjU)*1KBu2 zu8`U=$>p#$Tp`l z(BjKcmYt0HjOpM!r`?3T);eU{Q%!b1kuEwc0Jc8dK8S3KG;V(f9TvP^1-m017UiPN zg6hPPZHXc@c0^=PM0QE!FWV=1*lDvp)>_PaxmK0Gy^6Wad2VcL)34fnG3ur6;D>BQ zh%d4g>Y%xASyvAD=>e|+UI2rw(UJH5^``amndbh#SQ)~ur+3waPX|a}jvgR6E_gxm zTH8|!cJ3|(0sGhVlr}7SL7FwmQi2^2>|q8-tLF5TJP{{kLwCv7p$F_<`be-3l0JEV zpfm+^&Dt z0Zs#>fVTDc=j{dlB;WMqKXlcn@H}5(Ilw9+JKs6GuRA-wyBlm(Okv|fAX^tR^*!uv zVDG_v9D1`v_a59&9gpIe!hXe+rF(T{dsg*fSw0rrhK2mEY0@C~pRuOwoTo`u#Kx94 zv%Ca$(O+$^aM)dsw?o)3jR9}35nrIZZy~pQk)P~;a<_^q`ziosK1F~VV+!ov|(NO8)-O|RTMXj)tz zVP5z78)WYS+&~$M{Ol7CZi{f4==R?M5+OVz7?~J6sO#WKj5czCBoOi286<`5WW&=H~i~POX-ZGe6##osjxPY>^2D8HV5l9|3&NdF9!TWUZYQPQ~^XoWquz<2IRx#1Z@Ls z1f0O98`k8{yn4uD3D%&lz*_uuC9y`|VNIshNe^))*5<8E_Et^Rgs-ej;NerjKi1-k z&PZbm@D1?+TBHBc@NV3VHMc>u?wDlA^=-Kodh`LWXMrQYCZGj93;Mt^pm=mqK0ptk z5AZ^=x56khQ2#q%{`o-t#h^nkM6{ihAO75e3*l`SgT8~c#1rAa09bN9u=x)Z;%*&A;h!=w1spAN(28H=ueH zz1Ni!x}q}R-_xMa0am~)$g()qSBu)i9Nr${O@2aqo&x{5Xb)uDM7AqqC@oMGo}qLa zlznMfSDrR>9YFk}PkNi4fejD$C-+~yC^N3@3} z)E-bi>!JTIC~vqM)jx&Fe4-tX1i1Y<_XByt zw}=9Ie`0l}K9BOfrVOYqE6#c=Md)8a>-H!dl-6-B0cHc%0Jm2HeIWmG`p<%bKdygJ z1HcJz0JZ>=G;;l|`ImJDeyJ>a1O0)Q0B-9H+MwT&pJ}2`Rs+yAm4S>Vy+2v?Hl^02 zfhG++V-2VmA*l~i!eLUHHCg`&N{h9S&rMq01zJw4xk9aa{ni#gG;W)w55D4?h zVpe)M_GeCP@W0V}sUXt)9_DFiPv$57{|ED^%AD+^nsg_lE4zNRpm|xrQG?JGf_8m* zpXA=Rzwc9zMW-EJrKjy+_L4IzFO*}y# z%DLZdcC_f?wKi@HWl;VSmBwBp3A_YjIBZE(^%t7V}ZYpAH*n^3S)28tt~y zJLbI#b9YX}a1qV}OsTlYZcChy1r-KfGTjkL@?c-=THIPP^{RZe~B4 zi^G1D7A$GI1q*lS&eqNA%NBgwp9MOp{S)20GS?-2*qm=(Wcyb4WN}-%vZKFRutVFr zvW<&+-dZ-pggGrVc7Nl|w_hDMYQSvF-6kTx;Uhn)-7k)Km^8+c&ruf!3a#^f6?vFj zyICG#QF1-TtmtZ#SxIH2X))%@ipwI*N(zE4ivPr%RAr=D3Fgy^%Hel8i2D^`?yVei zTGudl_Gd+uS=GgWmMqUlI3*o@o|o;wmxt^2=|B9gb^(6G&qoYYy#B^J;_OL1R7u

{U$3Iv~D8^4{;l+zqdvpkHAO zb~)yJ=3?A$KF0X0b3$7EfcYM4jJqxq82vEP&pFsC5; z`N{BGfidlG!6)rmWsPU}yg*)Hd)_}|ta<>=fnw}B1^G`w-mbuWfbR=-FCNIBk01I7 zBGSe_A>smz@h`~^HuxH2o>tJuY|JH5o;3Hu*P6hs1#p=$&dmEAP|aRXrduh^M;ZT= z(*nhqxM)Lj63BlxbiNe%FG}&zo&&uv1Akvr+MqS{3i)z3s0GcNV9zO2ij(c#OdGQE zx=8mAb0L2n;6s%8aOD3g$FmmmIAbZD%pi4n*G&`iA4=?V(YmhR zUv*<1hU*x5p#972F}^#I(yT}{yx0=nn=!yYM< z0PLIB`8#?JWBiTgfjZcz|Gn~hkiR@u3fdm%2Jktv2R-+{7EXEqXdLs2>i2(5Pk(25 zXmsJBE|{`0jFdg}x9CI%Eq%Yd_y zi^eHvSi%IT8K-!p-VqNzqYLMhh-c!KiCE+LNcBH6HAu`kAA6US^xp;Fth6TjzG>kb z8)rJSMH^sktgfsAd^g{7?W>dXTK?SSvP%8@OPTh$=cCRi?C4XBZ{`aj ztHng{uqbSuq8+8>I=`)Q=+Ge|zDYSh@y;Tr_9{Vncz192)R?a>oeCa#G0v?S*1KWN zqKhceH#7d17RNXU?@}5;D{J3}^Cop|rZk$|ddK3HU;C|AvhejnRfv;;z}pxc5L4+pN|+M{xqdmi{6&+4G48^mS=E_+ zJqlAjOv{6v&C8SbnwKBmVO|#I()~)Nw^{Mgon}(lhBjvhy!@(pmmZ^g5ijz^*jr*j zoX*=hK{~5%AL%;(a-`nE%MmRX-a2Z!FfUZK>QY3Dmlzw_tTeLSx;$U4GZ+1Y@8xG_ zPwvUTDGu!x+Me)K{xdVE<-*J$gGE`v20vv4=+8K^M|ky&moDEou{gBrt_uOr?>PCZ z@Mb}Nrs+W4r)0qRRW|&7GUmVlv%S#dMr=2;Vn88wB6Dz`d%HIt`@$JCN0`#sr zLEj~S;!XMPEdI^h2te4?+VHw@{||kfaxDM= literal 0 HcmV?d00001 diff --git a/hack/make/.resources-windows/docker.png b/hack/make/.resources-windows/docker.png new file mode 100644 index 0000000000000000000000000000000000000000..88df0b66dfcf0b298de8cd296b793b9220c4a914 GIT binary patch literal 658195 zcmeI52VfLM-^S;9dgxUo6akTtgq{#WZz3H8tb`;KDWNHduM$KQ)K9@e6$LBOqzFor zPy}pLL3-~cKzhILKbJeW9LeSS?%qA4!|l#a{mo{dow~Dn_2}&HQ^kid=HInThdzwS zW^?PMCYOZm&Hknz@3CD5O<>HoEVnYY@QX@}`ScvozJ0G=!{aB$PZ%CQHmGa+_CaIE z#}6GbDvq(qCl~h}*01l43av8Ernc)gGvTvt@qNmB1@&q7K|;AXb?VpfF7@=x8cR=< z@4cgR$BxRX`?(x#+r|;dp=fdTNZKnCu9?MF3t&goSA+MH| z`BcqB&2LOL`G~IHd8)o_dRRVNtUp9 zex1H-iINTeD`vSW5tgtxbHN)j_WWlx%FC9_VL@f%W_Do1Te6i0o_)U~ z8yL#U4eho&oE;BlpqF@?FQg{8(u3nKPbb$~V~HeYI*$X74OuQdM06R8F3A~)3z?>Oywui$Z-0RtL7UT=Tbicghm7A=on*rwsm*B4Z7G_6VC z`4p;$C3DJCX;bnh?+;kLe}A)&HU_*Ld~>^6TXV>eOZit0tQgydu?rL5I+WiaWPO!{ zmgMrFMfnVXSz{s#;jVywfGY4z8%t(v-{QiDYGV>?u*clhQGZuU;Q+PinQE7i^?HsP`HgP&aacDs7- z{fq zYklNZalwqt8auiu%Y0Bf=#QD@de`hWr)ih4KJWJn>ffbWa;x$3@b_Nmw0Ta;eXP;j zx$kc69QN6?b-i2t>$jo(re)2mywmAx--EFe!xnyAVacL{ceJBuN_=5Jn-bl zQSJAJCSI?kX}o(>Xv00b+O2KT!28pfzt+}V>9=A}l{L?~XxyvY^LmHz|G6yScaGo!&d^*8Fr?x1Z~5E}gZz(vU`B?+xtI zpw_W6AD0a&^~TKhyPMZ8Gpl{0x|?RpyRQAV-{CTcI~^X=r`C-IKQ3z0rAn<&|2P>t za-Ampvj#y=?VE9V@#S5Y8(*$^xkmEyK8uI68TV~w-;4G7{Q1h$m9Lb#(&UOdZhoV- z{l8h*=ghLD{Yp3e__=ngmTv6xQ}6edm2L9s!tj5lSLoX9yH#I*ex%Cqio-KT{B(Wd z=F(?Af4s-Mg98r#b))n3dDj)Gll=VOEkAwl%whE^FRG-eG_3NZNG$LE@0Xul?%O}5|6lz+{_&;0 zZ?5jtzj5D7KYIT=cSZl@i+kPbTea`7A6qVaZ~3rg@%>tV`_`bmS?WQ%p4rv2{kOfB ztbO^?);&SKF_qU(Zct;S|B82Coz`@Bowfg*_}^>wx6IG0w&jCX%YR%N-oEkEjR&mn zlk(Zn)|yN6$7Oyov4{7ydOyrddF%bNAEk}?L6$`{i?;+`kv@}<_Di2Ds*oZk+!ir0rvVNSgoUZ!aZ$`Aqx|XJ6lTE&H)IL-P9Njn4idt+U@i^?JYB>QjFED@Fd^ zbXzEnOg%A2}VE^Y{T;FNc;#E%^+p=oQ52b(D)Z^10E51B7 zY}cCm8~@n&<1dxhTsf{jwR+>|jc-N|8&YxDFTbzemGtShPv6}9=Ktlj()ho3B49Im;pg$?BA$&nyKw&(Nv+@fJ$3GlkYNK4lsa&{@uiJr|9W!wh{b&u9qBx*;;>B< zrl-vOU{<%c|7tids^Oxld(Q0JcKPXvY2#kJFy(OC;zjEh?Yy(>)K5vxl13!GclxWp zn|$@;mxCHlIJj$H`}4i89BS?R)#srj&#io`XZxPhhHbc3DQ~p>m*@qt{#4j~&!RUyc4?*IRydXS_FKz`LzK8as1fl|y0knoc@&y8VQ69kM&j?QnL{ zu&|!tKLtPCw0_j`hOb1PY`(e4=GJ=}4juIO(SeHxb{*C2vG9iVS2r6o>bG;bcT(QF zxcH*)r8g?h583zP*w>ft-pQFM99A>rP_sibkGFrJ{^LE8yVUPK<&)cUwl;me_V10qYj9=LJ9U4Yz4aOYXU;5& z__D&n*MdejY8A09Z2Bj2W{+#Pyhis=Hx9cw?CQ|h{#qF~V_p95V?OvL7r3 zJ3aYI)%W86+`i!SXFvbAV)v%l149oFy>azy@R!dA%w0b3>b!t4{wGfU?0@drpYNol zcRUp`@1J_7woKpHe$zMGBY)i*_ro9i|7qE}-?#lz`(^iQmUQ^chUot?#~ezk`o`Au zCby5CdVXt0Ueq7Io$4>^-~H&&qj5Q1bJl0HUOj91jm)_@qhDOrWAwN?dliQjd74@Q z8~@w%@6l?9TlMSn@v*0Fbq?5D{kQzZcP9n=H}miRe_!mmSZi#D*Dv45@IIIExPQdS z*FJn>aSHBRxRX}b_R>Q2*gHL_E@r`RS`_H`+7=L$jsfia3chr7xt>xC1 z`(H@@bz<_mQ&-9k_;tX-pCVo!eRJZ?$ox-_^xV7Ui!BRRZ+c<;0}ANvpQ@-1>5An>%e! zv_JFS(bmuBpFV!~^wd`pCoM@ibh>kfddBnv`E$}f4s5Ubc)`biz3cz(tuI?dg@<2k zJwES7?how`#2%>gWySoj^Z#01u|mjOnMZH^xBt-mLrdo+&---y=>L6}cQf%;<)2RN z*}3QO-~O1~IP9g=XLIiCm~?tlwY%S(3+#FMe?e30X@_U6P3wE`hn!C&auIV@AeJ3>p$QY{ZyW zHSccOUNdM!Y^$0BnsyKEKDK?_@DW|6jF0O(rANP*DI;UTV{5jK@`;=rK@p6On;0E5 zdGx3;6Cx(Js;P@Nf{wX!NX;Ofmx&`=)ojZX3>w(IS5W)-@o_;-8#WD&32hV>)I7Xl zSktD>n>T6@)F?EpNl0i|NLb_Gu<(c`O(H@=gY>3mln)(6j*lH0(WgTveR6cxs^;*C z6URn`giM+=so|u?4dcfT3keGk4-W}#6w;_sF!>0c@cNjE(UXJ6OsG{5i6Nd2aT8+3 zj~F{~MEsZ_9&hxJ_*W;ks#%jK^uXxLHG1rWgvLzJvm=TjlcUFmgf$EeDJD?&?&gDy z9$jR}2@^ZMMmf=!tjGi>^m~15Tu7g|3GuIvkBRH}THKh4wTj9_Y|Mjr#=bg!l#Wkq zOi0|QxX~130%bm|$SjOCv_KvNFO-c3my0d^#1TUuR!*U?{7SLG;wFzMCK$g`5KMO~ z6x2{8-G347$Hzraj33`GK7LeG!Cm&S{Y1?tepdz6>mD65VhnFoJsBBd8t7q-Ka4M~ zL-fSBC<@y&IJ8-CXybliP3d-wXiCS;>A)zG0-q*vbf>#NHhN;TC9xFpYZ^~{?1-VS zTNKMfpM~S--aVr0mVQ}ie!KfElllvvZD zEt-T6iES1f8yhn;xJhX2kl^r`A+f>H;mu>CW1EFGj*X5sl%4`$ z(cPwd_VkS#SJ>5XVbloSGh}S^_z7|RPqS4`!{6qEht7j%3J;_E6OV|F;ZL8a7=8oC z#YTn{a($Ss2O;&rMm$=bmLVvJ-za$TX-pa(H^$uUUl5hfSMiZdh#xv}QuO$^w!`QS zvFvZNAl||OO%sR=;m_+)Bb-nb6QW>Vq0zHVrz+d{E=dra>Qu>_2A2#3*|9>oRKM zY!r&?afA+PUwUD@Kt!OQC-kC6w@TSp!|ERrK*HHiy}m;f<<3S8#-m|O)e6CmbK zfy+D}ldHgG0>u0&aGB?0auv8tfS5l8F7td$t^$_{5c8+NWuA}8Rp2rKV*V7k%=0n1 z3S1^Y%%1|6c|Imrfy)Gl`BUIB&&T8{aG3xxe+pdY`IuY3XxXkl0xe8n+K+K;4mw7%W zSAoj}i1}0CGSA24DsY(qF@FkN=J}Xh1uhdH=1+mkJRg&*z-0o&{3&pm=VNjexJ-bU zKQ)WX=izeDxG}T_bP_H0%;}f-4=of8is{m)J7ZIxpj}L+GnS>L-y4j*8p_zs!L-J7 zK4TT*KY980PR#rF?5-W!_M0qYTJj{*@_%7K00ck)1VF$_0&*+;!#fCo00@8p200hJ)YzPDbAOHd&00F^000JNY0w7=$0SJgq*boQ= zKmY_l00M%000ck)1VF$h0uT_Juptl#fB*=900ad000@8p2!Mc11Rx+bVM8Dg009sH z0SE~00T2KI5C8$22tYt=!iGQ~00JNY0uT_~10VnbAOHe35rBZ$gbjf}00ck)1Rx-| z2S5M>KmY`6A^-ug2^#`|00@8p2tYt^4}bs&fB*>CL;wO}6E*|_0T2KI5P*Q-9smIl z009uNi2ww|CTs`<0w4eaAOHcuJpckA00JOj69EW_P1q0!1V8`;KmY=QdjJGL00i8N zfGi<@s_RMw6>%MFqyz#W00M4B00QDx)rrD@00@A9>j*$VT*n$IfdB}AfLjrOfVfq4 zqA(x;0wCZz0uT__u|`TD00JQ3Rs_1jKc$krD`i00_7h0SJg& zRVNAq0w4eat|I^eaUE-<1Ogxc0&YbB0^(NHiNb&Y2!Md=2tYtw#~LYt00@A9TM>YO zxK(wcFdzT|AmBOz5D?d~MoJ(60wCa41Rx-8Rh=jd2!H?xxQ+k>#C5EZ5(t0*2)Gpi z2#8x%Ckg`sAOHfcBLD$$9c!cn0w4eaZbbkB;#Sp(!hiq>fPm`=KtNo_8YzJQ2!McF z5rBZWRdu2;AOHd&;5q^j5ZAFrO2rUJ&`vGp3YKtP-XARywb7vu^AKmY_pPXGcU`YJ*OK>!3mK%4|1 zAmXeSKtP-Xl-6WRCX-p?9ezPT^aQH!Od3L&3~+Hq zzq=4s=9Y`oL~H4(!v2uOs$Ta?f0F3xKrr3V3VF?1y7cmn9c zay(PS2m&Ag0vP%AOHd&U=sldh)viK2n0X?1V8`+f_nf2KmY_lz$O9^5Sy?e5D0((2!H?t1or?4 zfB*=9fK3D-AU0t`AP@in5C8!P2<`z8009sH0hKp+4DAOHdo5ZnVG00JNY z0yYtVfY^i$fj|HRKmY_FAh-uW00ck)1Z*Mz0kH`i0)YSsfB*v)C^@f`AkVRNt9&p7L3pY#Q0yWR+yAlI4FG7s+x{iL8d~Ewbrk{m7Ojn?d#_ zS#Ee7+*~1>OSTMIf3o+<-XrTpwjx<>!Y)LWIaqLre?dSv0@ghLi^?VcV}Sty5Ma!g zAaH9i{B6k~3I@0Gh6DYtcM*QL_!yf&I^h-u1l*1Q1jOyC)L{im&`xz&T%wO?CFQW3 zi*uRe;`ERl2sniR`mvmX6_J7f2!Me23E10@g^_ge*D-Pk0w4eaq9gzT5oNU?Qy>5W zARvAM5D@X#5poCuAOHfQB!H20QC5qSGqv(?BBjvU$*Gt|-<$PoxW?c}b7*_f@lS?x z7ya&_IkasGrM2?#c{(?pa#hHS6Hda21?KWP0W6|)EdjJZT+17&fdB}AfZGs23&d@z z(x!r3pioQ5@|A9r$Z|8S;D_-RLI~MSWcQH`BKtDg8)WB`!W+`uK>-rU8p87xd9%rKGn_16mA91ad9p3Yb|z~`lMdNb{tua1 zWHZSQCRo0$1?2cpW2tYur;|1R!00I&sK!fITAEolmqOvR_#{iXP zB#Fq}qOQZOWC9S7k{Q7V2!Mb@2~fAycomI%n9Dwi$OwptIYPx>gMbvz23J4;1SC#C zqfkt=qzF0@S$BDcC4S-OApr=;Ll!s!0T6H_0yOg~ipD++U-BxkWd3UP6dErXUZO{M zct8LG@&E-6KmY{XiGWfrpJGjU_`o_%^_@#od|O)+&XfH@Ks>qjQFah;V*+Z0!s-Qs ze5$XnQoV@=-wVy!a$`((6%7Jn7d`|A0T2))0cu6Gphbhm-zl~@n<}|nL+eKVvN(VT zdxU^^aOI=uAmFY96f$|7J!SJ&D63Xje`UEnad<*l2#6=tGRg=79!7vxn_JU;m7q0j zg=xykPo?fp&6!@N?l^x)04nm31&%-f1l)uGEdU9!wBu8>P>HQPJeZ<;*yJxb=PA_Y|Bz{>_fxOnl$Ft+^HV>)TMu#z zrDJZnsjc_(c)@9j|MsZ9Gl_>aq!~)*gX!E5-cNtdBPzrHA=8E2g6OvyKOs}w$WMU_ zW6zOWI-OK9IGFi0I)t2Qd9A$&V>jV%Sa~>+O5y4cAM|GZ8m=+WqX&C0I{wLU?xNov z7oy6vDU{aA!{_O|dLcLGpM(*MgU(OJGgGMk%Z(hROzyOoWB$avmzDW=QqO9aq)ynH z=YLUUMHg04RNw{zARt)+^iG`T%*^xGXxg!Hj5E`aO59FUX{iA4(0o2a<#u)5r*)qvSe||dp!^`< zW(2BkxORhPOI2`Yv9j~>QZ99?6yVG>ToE_Auv|eIX@CF-NQOYZR_pW@9u1`XO9qG& zGwSN5yAz9os6hY(K!7n4kSosSz$X;ZdJw}*J7*^++W1}F4=!2|F3H%+!+bjEaF?X@ zFp+yS<7f86Q<`#AFhw@V*v$pU%V-AF@dxRwJj|!H4t#L#nuGJiVu^85#9}O+moj#9 z!SQ@zas5F$D-XAz<1P=*U2|~IcvhTil5|Zf`AmD1Qi^hRYNR2}DUD;!PDjk~LqJ?x zJ6u4fx;9C6fzm4^Aap*MtgEN-9;7rM3J7CdKwQmY2c@}$tg+z&Qr+0W1;=YBjfVol z7#9#%vsg-L@_e|Rp%%yMLy;}{vpK1DY(;Mo(QJ6z|KW_U&tNgCzRobF7 zmY?)y%TGp<9YOYUvRlcXB%4X$lgVBn%j@NUk>&kk7s;lPzq@3Qko}2l0@=P~d6$OZ z2F(z`s{k1Q0T6IBft>vOQ!2TFFZ6Mm(K1H=7h#s4iHQ6w-_B?BO6NTFb zWP6bstiCSAl00w5q50U8JSpSGBBZhJFz zH!G8+=em5%+S+WNrT`194P=df2#7Vb;1>iyKr#fV8|y+wZZ4mxc~9M`Vw{{Bh8UfIxDWFBp#cc6}cf^2#A3M zen9{P+>XGV9@W-UE99ah3P$^+uxsgQe8)ydMCZ&n&Qs)Lotc0kZU~5h1b#sP1l*E9 zR(|dfM?CU*%Y(N-L~i)N%m8YA+;c@fAt0`xj5I(11Oy{Mt&nXrZt;gb8Mu>~A%#J6 zUc{j!9)H?XR%^mSK&+t!zaRhtk|aQ%5$r-!ZfzP+PR`C^_pX$MuXm(jDW>TCNF}U-cfZLOJEEF;`5D+2hBW(}>0YL~{f2K+}b!YujRN^->(phF+Zc&%S z;sRH_SJ3S|}J1o>3m`!r=&geHaZ#6MU3l$gYWb3zM* zQ-A>h5D+1O1g*nIhpFiyr>4jEq#y%iOjfASDb)1fD+Wbo_(#*{k^PX=qNauN+LAd)Z;5J^-MG6@18 zU=0Bhj*2R|d?^V>C%H^kly)%M?_IaxLo3V{yH}_@yUA`Pn@FO;zW^mUBeMoxG4&w1 zLJOARm4`zq?F0JsdDE0n81y2mA}T_%T4bKQDxQ?9MZol5BH*ff6|{D!W+`$>G5<8 z;rR<9?+@mNv*C;k{5U(Ft|3i2e87T-;bD0mcw9S(MH~8kV2H(eI;l?95WXn>hVZ;h z+@G4PH(6ePhAswE{`@-kXDEw-A33owVsVfk=*-H)W9j!}{h1-pw$!S#N?Hob(^|CP<01j#ag`&>8Cz=j-x6Q6ab9oF z(C;C#2gNd`Va@ZO^8JY6H#Lkf1o#8O*zku9x5mcsP%^mjhZ462!yk&KehlJc@MG?` zsPLxo@jn>jKN|jrWa`Ht5{9(6aGClsTr29=G&~mtZViTulxcjX{EAA`Ac}@~%>5P> z-ZV`vaK?uJ{h0bO|VlgZxW3)CHRr-WprUlw(5{^5C8#72$WrWDNG@k zk5bCyok%nuvqm%w@fUoPHq~13xJKcBHpGG7M+u+{>rp0f0Ra#YkN~wZno}#|1zPmi zhFTg=(4e^Sc$n2WGl)l)^_yHRrq&0aC;?Lp0SHJjVBib_AmCgABoq~CyqnLKicl~a zZ)MaW!6-xGVLC8vb4Co}VZ#oIeDIu)kHMHB;JE^_{A6X~(%cN2M=k}bH8p9bNBKuC zzySn6;86lHR+_d9QPZ5La%9WUGJrr5i?U<`6f(KDBZcD8#A&{{{=*(NhzE~uGqpa> z6%{Ys9un|e0b#5yVfgMLijt2s3MKPbna;E=IUIa|00_92fSuyOGyAb?b1co&9M)8n zVDZaJFwe*`2!KEd1RNk9$0&)-B_xB(M8J~;qyp{$5C8%9BH#e=;6P?m_Z1%lfhmE2 zCwF0)t{5%>6)uAS2#A@0jq5y2i*%bDzBP5kdC>$sSwJjWxmq+fxPkx(h>3u1dlFh< z&NoQ3)6ApR2j6oJ#sUJKEFi{vN?Cx4dqJz#LQwDq0xl!KH%8=pTk&5z4c~syP^PYl7`i9;m#;`c=x_l$eMm@!5K-}6{p#oH91VF%Y0;xIK zy5@)3KT6A^<34#4GUrW@qCOZS7(;p%@-M#qNS6PK)`&tt z_O4%bl?ZhwyNbZMsyV_x9cGoW@Ee#`#R<=a324q9WTn=9!erT=H=RFV}aHD5lOG=l2c5_1wkIklzdbyMAP{oUB4wm1m~1 z(rf;YrH6H9X-&E^t;*0yFXYXwPxt~4E*^#@=7HBvv08gpk$BKOlXRJ#iRjPLO7l-M z*lhUH^RqUGc2Gci&hb{p+hoU+HGQdnj@*7HD@r5yhtUaM4I*=tY;jFB-Um2|?Dr%j zyfy0>v;5>Nis&_KBdg{Y!05vu76;?qFus5P!o2@Yq^}ehJs{68%cS0~bf$Ri(aktiDhOxnCo^q+xua3K?Q^W@&WTivP~_Zz(s~n zwOc_j^DOXh-plfnQ^>;%^PV2PTFKj&`C9N{5apdK%=_?m=5_K9#vhi&8Quy>i5kfA z0?QZY#jV^G$+vYaFz=(gtRLf`2L=~h{zjhf6DUb8^7Q-6_n%GlLa~EhD6G4U^Syjo z=2NX$#*;1G`hH2HzZcz1D{Ap;Qt#@7qx6*%#nyK|eXQqpjZk)-1lEcc$(tk|f=u0| z=N@lm^d-xmL7~MK?BNC8trh8vo*2mr2tni@y za{3?Y=eNIKUrP^+%zEK0qhVW?Tc(O6=qQt}G4CUZI`NcI7mU3or*bWpRjV<}su{}i z{L9!IooK?}rGfG!nr+2}qso7~Sw;Q=W`)ouR|wLHNfN!7<#_oOSFY=Hfww-K(KTLN znv9-#{APWL>~ONxi}hIah27-bkDhrqioR@{t0V=4KyuOenyj@DjVbZ&tVFV0gnlNl z=jqpSv;1T?^8B;q0SbKb&k|OkR*23}pS|+dC8oY|lBqBLtNXQY4!?|LS8u>F>a}3m zK@BC_UBUbFxlpJtpCB$r8E=g_%j8t4%`)q@V41ZVF|Ec`Gr%n?4WEkab$UNdn%GAJ z;#Ry+SQh;QU%6$gvg~U0$<||BShT95UvM4-5a9v;W4@hwkEzZdVdc;4XQeM6Cl~H& zqoFUZ->+DM-K$!UCDU&jy=d?*FTEK`;<3nZ;>=%q*70W?f7bLN+m`HuOwh5l<}w9% zk)Cyl)&?xjzoZ0&U_MR-nMQVSaRsu&{S1XoB)f;~-(>$I(8a&%EkD_W{QO1MLc^QW z_gJa>H(15gJ2Xq^0zD*8(Ru~mMd^6M2Til9Jz4Nzp#J~7QWYGZhy!Be-E+!2mvutQ zCkUv>DsR($ZZaV5fTYYvL9Mv#s&!Zv2`}fCSGtmU|4zEfXH==_;ioxwm_*|wlR0L# zkmb3h2WM`%$5>AJ>MXZ>HL{Ph+_H~}_GN@m-Bw=yk7>^yW&y-YPHSPDVe;tjDY-%o zmKjj)TBYNAM#;4KhbW!1^o+1Wr3&Yio^MKWsZX{s*$A?&$krw6Xv1H=-=w@>v^Qc& z2nc~~Km{F5Hkz!fMgemvE#6(pciZXi8S2VY-nmLI8N`gHY74y9 zCzW!4mh2_H=`7t-VS!Zb45j4hUb8ySwRCw3a6J%?Xb=C8YzZ@?i z`xyzzVfwYnh*m&IG+T(z$;4k&I1}#1?-|{FLt{=_dePGI$s=<9lC3wy#d!KL=OO33_%FYI_=g9!B}!p6ltcQzd#MQDkmVDMteRCODghzUXh?;4T5ow1&hq)L@Ko9NKlTO zX33=j649d{UslfthFa@?c^@DQ2!H?xfPnZ3+@dV*Buk5sbfR)-&$^X%O(GC0`cr8pj^Ji*80}w@JU`UAOHep3D9ct{L~C##Dizd+h;Gt!)y)Nb=ww-=9x^T;&Z*NHrkv#n}o;egb)A(BuGG;MO{`Yv{Q)j2BNMmhmX%lHX`D#tCRJl z^gLTYcugynd?Ard2HjXTVM8FP6G+fbHD3fnzg4;bgC52?^Y#6k4G;gq*a^puCjb>u zD7V{+Rhx~$5L?+Hocj^r{Z@I&X-x39E0rpXX}mhRA7Sy82m+!|EU~p(bblx|45HYY z9>RfuSPAfHRt5c5)(TQ%2xIC#JUiw%;>`6p%?IVCO5CgwC+i$Y0EStYht_Y-WXv%2-4+aF>kpORXq|;c37-t^( z`0in&TH(79Hr#g@1F->GkwH}IUy5|M&@mrlAr_K?H>nZevki6AbHykg8ch;YtJ_MA zsk`JCU08;iRHb zy24DBqWHML4FY0-9M|k5y~$YhNiRcC-7o)RqisiFK)@XcXtQaC6(8#mW2=J$=IcFt z{bFgi#FOs8TdHM%fEd@DT5Z?9&wN^=DBskK%ZOtTFh_u|>)>1H@|_UHGQ+B<{QCMW zrL9Vq7Ig`3t|Nd!bYs?E{8f(KyL*#%fwOruT&7krxv!UTtR8pF;XPT}o7MK$Fn_gz zC1>Zc6ZbRONwP5FC7}Co2X94-SvWX8AKK??RI8@qB`KYp**l010Wq!XPj?<=8R_*+ zovk^S(-)^?9{9y6Idv=as$Pl>t5=>?^nP@8{)N_%*Q8)4GX_1h3rZpj}Q=N)Y9zj$t*j|-=3&FX^$)gwLBKJ3Sy1Q7KrA) zJ$u>4pEt1U*RJUhDwRq$>5VC@dQkNTNVi>0W)uIogqFwyBCb8)qM`fFtk}D&3|JJ3 zJQ_3)r*V(J3{Lp9pMbprfOiCUvM6V3+;o4OVx0PDO(87w`+V&(55(h}B}>_t zi@tau9z2RR?b;O*5AN2!N&xFxEs!6=xR8MEQ-SogC%!09^kW?cE^@h+`T4&9@h|`p z&o2a|sJc)nCe!yY?c3mrcMs4UdUrQ!T74f=r(gm9j_rwT*N&Z!6uE4%L&wm0<*Ij0bcldEF0SHJ@y!rQTy?uTv>XJ2X zy1_{|UmMc2ye|u+nO3|hykY&%MdIOITDkNgd4oB6l`-5>y$FI}P; z(07ZB>B9N*?7itT*oJlMb=_N^d_0@+0d{VlEahEf7&zKbz_9V5q{cdoi|yzC6@A!z zvau76#YI3VuIz{?H$$5zvAMf9u+&t$KTgBD4io`2z|5QDZf4483T~m~`^jcYcIo0p z#;w8JCjkkd*;%Ei^#bEq0=kwjpIs&87Z+dOpV=!dhd7oDVskVBw{L+YXs6oAOsN`d z_kak`f_{mwow*TZ@O!jUkQ-mWhjVx>$*8X{ZDC9%CC9$;CQH)t7(X;OTDO#3YJ(l7 z73Ax;m5pp=_uHFkv^aM>0k>BXyO8mwh_}yTyW-FX=C6dh=`W6mOhj>#647QmD*U!IjITSgb|RB}Oh5fw>#SN#DwJ$VMD|0zMzrbSJa-W~y#gphBTw9lLb4sAyfEd0OZ1PF61a_inmHVc@9< z0(^Q^dX|3YLkW(On-$Zw) zSxkxHEK&m45X;hvnUi>(Wo1^h)O$((_^yk5_eFOwK1w~{4Yv~xUUWWR7gMe*xCxx&1p?w6u0_ZC z`R=tWEwx9{7j1KOClTQ-k=VNB+4Hr^GL7QVdCO_k-SySU+wAL8w^=q?9wpx}d~lq% zIOq#cZntw7pV#i|_aTYLt0jYg4`&g8fH;e6QSr{-t6>=#Nh~MZvgt-e#ZiQt+*`wB zUg{#Qh`@=uN)$3hhFq!MDpx39q88X=IoTP1(xa`|Wq`gUBrPlYv)Ta~7C?(VcuRwS z5MTB}(`j9d^BPB;mn_xH3T3auHjrbmASirqGjs63Om|=ru&$P zAHCSSa@jg{CCO#-TxwlhlgpLcWD1>N+$*UxszrMpLy$K3RnL;tmQEjVw1WM!miNO>HVMURWfod?E? zs!WaMI#X-HNIb3=bs28bB!F31w$|H%ePJ}M_h))Ady%F2t6CM)<|}9(tDMg5ezKN0 zE~1;oN&ggdIdOHML}j@$nLJY_m*1n~n@lc0M8|6~UJfgIuUs3-?!EM2hup064f!&y zNC$0fD3l9>=r*h3Gpe!+^$NN$2LdGJ>l-MU1eHX zGPNR(()BYmE^);uj8hN2mmY}C&&f{6%ge2zmGdDI7^?_S>yLL+vRq|-0QpA8LE{$FSruPU_yz2qMrxz1K!DAA(Y&}-SPo$3T@2y-vOEb+A)e9tr9MMcLRX*8N!wEa&6&9uTzBk~)-UI8im zq!4*wkv-c9Y(IbXi5)jHMyX`7j+K0r)hl_cyvup3WZoneCG|;F^3|}&ioR_3jWpXc z;Lag+T~ss$m(Qk>@`pGaQAfVMTbN4Sg$Bz#R6QL5Q95y83`9D0^+-s?x{RQjtGGL2F}6Q)X%iASO@Uwvd}g zpK#HjZ<**s!=2Y{SX#;a6P~_)6R68-wxvPfPj~|Mwm={t4ywc7PMr(P%+vlxkLfVJ zO-DwaR!gg{^K)q%ql{c_9=Y-1(_Ff)n?FU{deH1yS-y-$(&^!@n@LNLBsrgL%d^C% zRe961pC3`HK~9UimEN?vPbFt^H3bONC}rjSePtT8>n1~u+k2jEzwW`U5S?i7mIhh= zp?6(PI_L(Hax@yR8_dhA4Ru)^mvS*YG4Bu%5!E`0h|i6Ty!{ou6?I*eE&d%2UoS7_ z3L&{k-=65S$-}y}4c$|Gl{i`&g>P%#s`d5zmiDuHvG4_)NriyD0)p>>IIyPMJy9c_ z#G}!rwA@n;jMi*ad3kv(IW3Ldp%0CxXJ%4+#(A&B6?`;oKn>Gb$7Zu9$%|TI%2$Tx z)2Ctc?xpI!Bnj~_sY;bPmH7vBf_Rv4@vu{LVL73~sU@O5m7H_ri2zj{Cq!>HO8%`4 zwMxajsB4S=I@+YuTX@vQlkW8$4@1`!AJgEi3@3`PqwB*sz69-5<5MRd^QMQ7?^gO$ zXLlMccgW{DotRrhT>=4n-#VcpB>=YdGAW7TU{coKp=C6ow#H#^IQ|h14Jirl<5DRd zH|uWG#UwWApKJDJPq^@0HE7C|ZrT)A4r+G5SU^CnPGR0Y18KlxqXoX5Sx)H+e=#Y9D;@q9$y z8gqkI7LJ* z98?`jWe)T4na+ksOm|R>h(?42ARr>Fl;W~9WYghZ9V>gUuIQ~4QCpyrvpHgg& z2LUBP00JV3YI>N-*iDD(gqGGEu3uWix8s$(IdLzOjoEdX(W*(^_Y7$fWWiUl%=Xuj zXYQYigT8x7cO&b?Lh?|gS^#gKGfb`K!f{ZlML@o-BLD%hj+ZEWPfa|m&SLTtovQi< zi6TRGW@7Exdu+}jfx5En%sYY;0gWb!c0e3S!m$iIJc|GXM8fqQv-$9%-qn0##X2rx z4chnrxz2vQh<)F!VJ5g=l`5Tt<8AtA$1K6~f(!^k00JTicE=?ev-wEJ@N(*nHT~6& zTd3i<>{%RBE{$1C{QVL;d^^M9Fz_f40hKC?gkuiPl6W0k=L%E@B1skkB9e-*CmSE_ zJ?eEmOM9eq72n!shYsvX#06oKv-8-f9T(Z<)NB`|h9s;epd{h&_FBjk%JEpwVKqIs z3;+RfqiTJ5)8WaTD|x?5AMKFLEG<*~H`8<2=$#kYtqiO;Gi4*#xk{bJyfmNDEVeh; z$W~Z(FIYv0FJTCX`0B*29L?HutV~uu+aFocySiNw2^jc7N;Vs}>ms|Cg=dw3)xZbG z@6sZ`59mW3?+Ka@BraM45D?K;jeQv#y7kDIh_afG9`{pOw?y2&WE~dzTyhqhu=^53 z#9>4&jEMJvdU>6uPXoS5!hu;;7Gm>MZx9eSuKBO_pHTmolDoNWMeo)ch4gy0AR;cj zIr(P+y}WkN0>Du;9ru_EQ$|t}CIA7EaOGK=^_lyQH_9z<8>A6%M6wHUS8Tvk8kR{xQG(`&_*M)xvs#YF`m$ ztHi9_&B|qy_grD8Xw^E50|@ZehQ@0jZL9nS8y24EfEW>lhZ2B*cwoi9z30DiK|acN zAM;guNu~H1x%q6y-`CilYj{bvsm}P=hQ{kMZK1q`wttyHp96#%+XNDUj0r$MjA?k# zF@3N~w*5xtq-x&sw`vEdy`@lezWvo#$8NLlPTbLLg(ZbN3!X=f<_-;rucj`lX*9NR zMezI}18zkC0^(NHX^&Y`PA&gG`i^dC+hTBSs9jf6*!%mhv$P!AZ5O660p8M3 ztM8I%Y@%(CXHXZ_zlA;E3*VFU92ri|n?=8~rju;^GLG|f> zU1i4|y%oG60V)0JL_>3px~G0-O64pX3_l~K>?5zzB_LG+NzhJ}E_6xcocg!|cHheU zxQ4ItxvDoK$&ocb0O=|UVntQu~_QC&z`lzrSrDxp%qA0CY zRYVlIbOk`+_1eS$SIa<*{3A^_hDt zPyeMU5oOQ%tjOj0G?;ON`lbFPyPWI_7SjYPVyq{L5Kbfj0dXQ+ca55h$*Gjwc9+s~ z{#VUg(X6(=+MD;qi@}_|pT%a=7pnffBeA{VRjN$tjyg|Mrhcc7|NcmOiEI%APvijv z%n^Wqn4^Ph77-vpDZBetR=iTC9az;{5%id^N+qz6v`~7RNSIg#c3v2kXXoeFc}HP(B|nb%?lC(0VffF zfH(;(qAg6||NcBt>&U(AI6sA~bvcc^R%LHRDVpJ>a>X}!lc*K)_3=Awl1ThP?tik_rdbY43lV{U2nj$yL|7@Fl%;Wr$Eu!7&FSZ( zkTvsD%IXBFWR**)rtnuOWHeyUln!XX+@vp6@ulp)6elFS)xZa@WwKoQ z%_1R4qu={%?W61<00JOj2LT9(9qI5JlQm;Ye9|S-E1Oy@g z0TBp1k_7<}00F5JfPhH729bXd009sXhyVmcAn-^Q1V8`;q)q?=BJ~!3mK51f)&?0wVPqME*el1VBI_ z0uT^^z#~}@009t?IspiX)N2s=2LTWO0f7iWKm-DhWI+G~KtSpQARtn&LF69DM7j00JNY0)h~LfCz#ei3(33K|59WOdtm!AbkQ*5$V?dd5|BF?0XxIq8}K){U& zKtSBMa#1u8009tiCIJYDGwC935C8!XaAN`x5I3$|6b%GG00f*#00QDnx`-PDKmY{X zm;eOCjVl*L0|5{K0cR3`fH;#b;syZ_00B2900D91%0!3m zz>Nt&K-{=;Q8W+$0T6H|0SJgQ=^}0r009tiV*(HmH?CY14Fo^{1e{3#0^&@%h#Lez z00i8a00hL1D;Grr0T2KIXA*#bIFl~o1_2NN0XHT90deEXMbSV21VF%<1Rx;Jq>H#g z00cn5jR`0T6Iw0uT^4u3Qui1V8`;oJjxz;!L`T z+j;^C+NstDLI@B50l^4BMFc~Sgh2oVKtS3AARyANKja<+KmY^;BLD#r3_TJC0T2KI zX%m2eNW1=!dk_Et5D<(21Vk|ONEie_00g8>00JWI`a|wP00cllFai(|!O$aN5C8!X zkTwAbh_veuxd#Cd00F@WKtKdTkAy)01VBLA1Rx;Nu0P}+1V8`;1S0?e5ez*N1_2NN z0cjI}fJnRkkb4jS0T2+300cxZ^hg*4KmY`!O#lKS?fOIRK>!3mKrjLj5W&zRVGsZT z5Rf(j2#B=n54i^c5C8$e2tYstLyv?(00cll+5{jV(yl+`9t1!D1Oy`h0TB#65(WVf z00C(efPhH5{*ZeR00D^-nB6gcvqZCAINMN>!aQ&W0w4eaAYdT@2#AHC;0**o00cmw zFaZciVIDXG0T2KI5U`K{1jIs6@CE`P00JOTm;eN%Fb|x800@8p2v|q}0%9R3cmn|t z009svOaKB>m 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + fi + + debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" + debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" + debDate="$(date --rfc-2822)" + + # if go-md2man is available, pre-generate the man pages + make manpages + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-deb:$version" + if ! docker inspect "$image" &> /dev/null; then + ( + # Add the APT_MIRROR args only if the consuming Dockerfile uses it + # Otherwise this will cause the build to fail + if [ "$(grep 'ARG APT_MIRROR=' $dir/Dockerfile)" ] && [ "$BUILD_APT_MIRROR" ]; then + DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS $BUILD_APT_MIRROR" + fi + set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" + ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + WORKDIR /usr/src/docker + COPY . /usr/src/docker + ENV DOCKER_GITCOMMIT $GITCOMMIT + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \ + && ln -snf /usr/src/docker /go/src/github.com/docker/docker + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN cp -aL hack/make/.build-deb debian + RUN { echo '$debSource (${debVersion}-0~${version}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog + RUN dpkg-buildpackage -uc -us -I.git + EOF + tempImage="docker-temp/build-deb:$version" + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) + docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/build-integration-test-binary b/hack/make/build-integration-test-binary new file mode 100644 index 0000000..a842e8c --- /dev/null +++ b/hack/make/build-integration-test-binary @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -e + +rm -rf "$DEST" +DEST="$ABS_DEST/../test-integration-cli" + +source "$SCRIPTDIR/make/.go-autogen" + +if [ -z $DOCKER_INTEGRATION_TESTS_VERIFIED ]; then + source ${MAKEDIR}/.integration-test-helpers + ensure_test_dir integration-cli "$DEST/test.main" + export DOCKER_INTEGRATION_TESTS_VERIFIED=1 +fi diff --git a/hack/make/build-rpm b/hack/make/build-rpm new file mode 100644 index 0000000..1e89a78 --- /dev/null +++ b/hack/make/build-rpm @@ -0,0 +1,148 @@ +#!/usr/bin/env bash +set -e + +# subshell so that we can export PATH and TZ without breaking other things +( + export TZ=UTC # make sure our "date" variables are UTC-based + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" + source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" + + # TODO consider using frozen images for the dockercore/builder-rpm tags + + rpmName=docker-engine + rpmVersion="$VERSION" + rpmRelease=1 + + # rpmRelease versioning is as follows + # Docker 1.7.0: version=1.7.0, release=1 + # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 + # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 + # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 + # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH + + # if we have a "-rc*" suffix, set appropriate release + if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then + rcVersion=${rpmVersion#*-rc} + rpmVersion=${rpmVersion%-rc*} + rpmRelease="0.${rcVersion}.rc${rcVersion}" + fi + + DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported" + fi + + # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better + if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + gitUnix="$(git log -1 --pretty='%at')" + gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" + gitCommit="$(git log -1 --pretty='%h')" + gitVersion="${gitDate}.git${gitCommit}" + # gitVersion is now something like '20150128.112847.17e840a' + rpmVersion="${rpmVersion%-dev}" + rpmRelease="0.0.$gitVersion" + fi + + # Replace any other dashes with periods + rpmVersion="${rpmVersion/-/.}" + + rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" + rpmDate="$(date +'%a %b %d %Y')" + + # if go-md2man is available, pre-generate the man pages + make manpages + + # Convert the CHANGELOG.md file into RPM changelog format + rm -f contrib/builder/rpm/${PACKAGE_ARCH}/changelog + VERSION_REGEX="^\W\W (.*) \((.*)\)$" + ENTRY_REGEX="^[-+*] (.*)$" + while read -r line || [[ -n "$line" ]]; do + if [ -z "$line" ]; then continue; fi + if [[ "$line" =~ $VERSION_REGEX ]]; then + echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + if [[ "$line" =~ $ENTRY_REGEX ]]; then + echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + done < CHANGELOG.md + + builderDir="contrib/builder/rpm/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-rpm:$version" + if ! docker inspect "$image" &> /dev/null; then + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + COPY . /usr/src/${rpmName} + WORKDIR /usr/src/${rpmName} + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN TMP_GOPATH="/go" ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN mkdir -p /root/rpmbuild/SOURCES \ + && echo '%_topdir /root/rpmbuild' > /root/.rpmmacros + WORKDIR /root/rpmbuild + RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS + WORKDIR /root/rpmbuild/SPECS + RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName} + RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd + RUN tar --exclude .git -r -C /go/src/github.com/docker/libnetwork/cmd -f /root/rpmbuild/SOURCES/${rpmName}.tar proxy + RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc + RUN tar --exclude .git -r -C /go/ -f /root/rpmbuild/SOURCES/${rpmName}.tar tini + RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar + RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + --define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \ + ${rpmName}.spec + EOF + # selinux policy referencing systemd things won't work on non-systemd versions + # of centos or rhel, which we don't support anyways + if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then + if [ -d "./contrib/selinux-$version" ]; then + selinuxDir="selinux-${version}" + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + ${rpmName}-selinux.spec + EOF + fi + fi + tempImage="docker-temp/build-rpm:$version" + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) + docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" +) 2>&1 | tee -a $DEST/test.log diff --git a/hack/make/clean-apt-repo b/hack/make/clean-apt-repo new file mode 100755 index 0000000..e823cb5 --- /dev/null +++ b/hack/make/clean-apt-repo @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +set -e + +# This script cleans the experimental pool for the apt repo. +# This is useful when there are a lot of old experimental debs and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental +: ${DOCKER_ARCHIVE_DIR:=$DEST/archive} +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }') + +# get the latest version +latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine) +latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*}) + +echo "latest docker-engine version: $latest_docker_engine_version" + +# remove all the files that are not that version in experimental +pool_dir=$(dirname "$latest_docker_engine_file") +old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") ) + +echo "${old_pkgs[@]}" + +mkdir -p "$DOCKER_ARCHIVE_DIR" +for old_pkg in "${old_pkgs[@]}"; do + echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR" + mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR" +done + +echo +echo "$pool_dir now has contents:" +ls "$pool_dir" + +# now regenerate release files for experimental +export COMPONENT=experimental +source "${DIR}/update-apt-repo" + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/hack/make/clean-yum-repo b/hack/make/clean-yum-repo new file mode 100755 index 0000000..012689a --- /dev/null +++ b/hack/make/clean-yum-repo @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -e + +# This script cleans the experimental pool for the yum repo. +# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental + +suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) ) + +for suite in "${suites[@]}"; do + echo "cleanup in: $suite" + ( set -x; repomanage -k2 --old "$suite" | xargs rm -f ) +done + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/hack/make/cover b/hack/make/cover new file mode 100644 index 0000000..4a37995 --- /dev/null +++ b/hack/make/cover @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +bundle_cover() { + coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) + for p in "${coverprofiles[@]}"; do + echo + ( + set -x + go tool cover -func="$p" + ) + done +} + +bundle_cover 2>&1 | tee "$DEST/report.log" diff --git a/hack/make/cross b/hack/make/cross new file mode 100644 index 0000000..1607ad7 --- /dev/null +++ b/hack/make/cross @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -e + +# if we have our linux/amd64 version compiled, let's symlink it in +if [ -x "$DEST/../binary-daemon/dockerd-$VERSION" ]; then + arch=$(go env GOHOSTARCH) + mkdir -p "$DEST/linux/${arch}" + ( + cd "$DEST/linux/${arch}" + ln -sf ../../../binary-daemon/* ./ + ) + echo "Created symlinks:" "$DEST/linux/${arch}/"* +fi + +DOCKER_CROSSPLATFORMS=${DOCKER_CROSSPLATFORMS:-"linux/amd64"} + +for platform in $DOCKER_CROSSPLATFORMS; do + ( + export KEEPDEST=1 + export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION + export GOOS=${platform%/*} + export GOARCH=${platform##*/} + + echo "Cross building: $DEST" + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + source "${MAKEDIR}/binary-daemon" + ) +done diff --git a/hack/make/dynbinary b/hack/make/dynbinary new file mode 100644 index 0000000..981e505 --- /dev/null +++ b/hack/make/dynbinary @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +# This script exists as backwards compatibility for CI +( + + DEST="${DEST}-daemon" + ABS_DEST="${ABS_DEST}-daemon" + . hack/make/dynbinary-daemon +) diff --git a/hack/make/dynbinary-daemon b/hack/make/dynbinary-daemon new file mode 100644 index 0000000..d1c0070 --- /dev/null +++ b/hack/make/dynbinary-daemon @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -e + +( + export IAMSTATIC='false' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/.binary" +) diff --git a/hack/make/dynbinary-docker b/hack/make/dynbinary-docker new file mode 100644 index 0000000..ec07601 --- /dev/null +++ b/hack/make/dynbinary-docker @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -e + +[ -z "$KEEPDEST" ] && rm -rf "$DEST" + +( + source "${MAKEDIR}/.binary-setup" + export BINARY_SHORT_NAME="$docker_BINARY_NAME" + export GO_PACKAGE='github.com/docker/docker/cmd/docker' + export IAMSTATIC='false' + export LDFLAGS="$LDFLAGS" + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary + source "${MAKEDIR}/.binary" + copy_binaries "$DEST" 'hash' + + for file in dockerd docker-containerd docker-containerd-shim docker-containerd-ctr docker-runc docker-proxy; do + ln -sf $docker_BINARY_NAME "$DEST/$file" + done +) diff --git a/hack/make/generate-index-listing b/hack/make/generate-index-listing new file mode 100755 index 0000000..7094ad7 --- /dev/null +++ b/hack/make/generate-index-listing @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +set -e + +# This script generates index files for the directory structure +# of the apt and yum repos + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt +YUMDIR=$DOCKER_RELEASE_DIR/yum + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before generate-index-listing' + exit 1 +fi + +create_index() { + local directory=$1 + local original=$2 + local cleaned=${directory#$original} + + # the index file to create + local index_file="${directory}/index" + + # cd into dir & touch the index file + cd $directory + touch $index_file + + # print the html header + cat <<-EOF > "$index_file" + + + Index of ${cleaned}/ + +

Index of ${cleaned}/


+
../
+	EOF
+
+	# start of content output
+	(
+	# change IFS locally within subshell so the for loop saves line correctly to L var
+	IFS=$'\n';
+
+	# pretty sweet, will mimick the normal apache output. skipping "index" and hidden files
+	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "%f|@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,\1,g');
+	do
+		# file
+		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
+
+		# file with file size
+		F=$(du -bh $F | cut -f1);
+
+		# output with correct format
+		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
+	done;
+	) >> $index_file;
+
+	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
+	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/\1,g' >> $index_file
+
+	# print the footer html
+	echo "

" >> $index_file + +} + +get_dirs() { + local directory=$1 + + for d in `find ${directory} -type d`; do + create_index $d $directory + done +} + +get_dirs $APTDIR +get_dirs $YUMDIR diff --git a/hack/make/install-binary b/hack/make/install-binary new file mode 100755 index 0000000..57aa1a2 --- /dev/null +++ b/hack/make/install-binary @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e +rm -rf "$DEST" + +( + source "${MAKEDIR}/install-binary-daemon" +) diff --git a/hack/make/install-binary-daemon b/hack/make/install-binary-daemon new file mode 100644 index 0000000..12126ff --- /dev/null +++ b/hack/make/install-binary-daemon @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e +rm -rf "$DEST" + +( + DEST="$(dirname $DEST)/binary-daemon" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}" +) diff --git a/hack/make/install-script b/hack/make/install-script new file mode 100644 index 0000000..558ae52 --- /dev/null +++ b/hack/make/install-script @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +set -e + +# This script modifies the install.sh script for domains and keys other than +# those used by the primary opensource releases. +# +# You can provide `url`, `yum_url`, `apt_url` and optionally `gpg_fingerprint` +# or `GPG_KEYID` as environment variables, or the defaults for open source are used. +# +# The lower-case variables are substituted into install.sh. +# +# gpg_fingerprint and GPG_KEYID are optional, defaulting to the opensource release +# key ("releasedocker"). Other GPG_KEYIDs will require you to mount a volume with +# the correct contents to /root/.gnupg. +# +# It outputs the modified `install.sh` file to $DOCKER_RELEASE_DIR (default: $DEST) +# +# Example usage: +# +# docker run \ +# --rm \ +# --privileged \ +# -e "GPG_KEYID=deadbeef" \ +# -e "GNUPGHOME=/root/.gnupg" \ +# -v $HOME/.gnupg:/root/.gnupg \ +# -v $(pwd):/go/src/github.com/docker/docker/bundles \ +# "$IMAGE_DOCKER" \ +# hack/make.sh install-script + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} + +DEFAULT_URL="https://get.docker.com/" +DEFAULT_APT_URL="https://apt.dockerproject.org" +DEFAULT_YUM_URL="https://yum.dockerproject.org" +DEFAULT_GPG_FINGERPRINT="58118E89F3A912897C070ADBF76221572C52609D" + +: ${url:=$DEFAULT_URL} +: ${apt_url:=$DEFAULT_APT_URL} +: ${yum_url:=$DEFAULT_YUM_URL} +if [[ "$GPG_KEYID" == "releasedocker" ]] ; then + : ${gpg_fingerprint:=$DEFAULT_GPG_FINGERPRINT} +fi + +DEST_FILE="$DOCKER_RELEASE_DIR/install.sh" + +bundle_install_script() { + mkdir -p "$DOCKER_RELEASE_DIR" + + if [[ -z "$gpg_fingerprint" ]] ; then + # NOTE: if no key matching key is in /root/.gnupg, this will fail + gpg_fingerprint=$(gpg --with-fingerprint -k "$GPG_KEYID" | grep "Key fingerprint" | awk -F "=" '{print $2};' | tr -d ' ') + fi + + cp hack/install.sh "$DEST_FILE" + sed -i.bak 's#^url=".*"$#url="'"$url"'"#' "$DEST_FILE" + sed -i.bak 's#^apt_url=".*"$#apt_url="'"$apt_url"'"#' "$DEST_FILE" + sed -i.bak 's#^yum_url=".*"$#yum_url="'"$yum_url"'"#' "$DEST_FILE" + sed -i.bak 's#^gpg_fingerprint=".*"$#gpg_fingerprint="'"$gpg_fingerprint"'"#' "$DEST_FILE" + rm "${DEST_FILE}.bak" +} + +bundle_install_script diff --git a/hack/make/release-deb b/hack/make/release-deb new file mode 100755 index 0000000..acf4901 --- /dev/null +++ b/hack/make/release-deb @@ -0,0 +1,163 @@ +#!/usr/bin/env bash +set -e + +# This script creates the apt repos for the .deb files generated by hack/make/build-deb +# +# The following can then be used as apt sources: +# deb http://apt.dockerproject.org/repo $distro-$release $version +# +# For example: +# deb http://apt.dockerproject.org/repo ubuntu-trusty main +# deb http://apt.dockerproject.org/repo ubuntu-trusty testing +# deb http://apt.dockerproject.org/repo debian-wheezy experimental +# deb http://apt.dockerproject.org/repo debian-jessie main +# +# ... and so on and so forth for the builds created by hack/make/build-deb + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# setup the apt repo (if it does not exist) +mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists" + +# supported arches/sections +arches=( amd64 i386 armhf ppc64le s390x ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit) + if [ -n "$exists" ] ; then + components+=( $component ) + fi +done + +# set the component for the version being released +component="main" + +if [[ "$VERSION" == *-rc* ]]; then + component="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + component="experimental" +fi + +# Make sure our component is in the list of components +if [[ ! "${components[*]}" =~ $component ]] ; then + components+=( $component ) +fi + +# create apt-ftparchive file on every run. This is essential to avoid +# using stale versions of the config file that could cause unnecessary +# refreshing of bits for EOL-ed releases. +cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" +Dir { + ArchiveDir "${APTDIR}"; + CacheDir "${APTDIR}/db"; +}; + +Default { + Packages::Compress ". gzip bzip2"; + Sources::Compress ". gzip bzip2"; + Contents::Compress ". gzip bzip2"; +}; + +TreeDefault { + BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; + Directory "pool/\$(SECTION)"; + Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; + SrcDirectory "pool/\$(SECTION)"; + Sources "\$(DIST)/\$(SECTION)/source/Sources"; + Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; + FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; +}; +EOF + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + suite="${version//debootstrap-}" + + cat <<-EOF + Tree "dists/${suite}" { + Sections "${components[*]}"; + Architectures "${arches[*]}"; + } + + EOF +done >> "$APTDIR/conf/apt-ftparchive.conf" + +cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" +APT::FTPArchive::Release::Origin "Docker"; +APT::FTPArchive::Release::Components "${components[*]}"; +APT::FTPArchive::Release::Label "Docker APT Repository"; +APT::FTPArchive::Release::Architectures "${arches[*]}"; +EOF + +# release the debs +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)" + DEBFILE=( "$dir/docker-engine"*.deb ) + + # add the deb for each component for the distro version into the + # pool (if it is not there already) + mkdir -p "$APTDIR/pool/$component/d/docker-engine/" + for deb in ${DEBFILE[@]}; do + d=$(basename "$deb") + # We do not want to generate a new deb if it has already been + # copied into the APTDIR + if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then + cp "$deb" "$tempdir/" + # if we have a $GPG_PASSPHRASE we may as well + # dpkg-sign before copying the deb into the pool + if [ ! -z "$GPG_PASSPHRASE" ]; then + dpkg-sig -g "--no-tty --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE'" \ + -k "$GPG_KEYID" --sign builder "$tempdir/$d" + fi + mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/" + fi + done + + rm -rf "$tempdir" + + # build the right directory structure, needed for apt-ftparchive + for arch in "${arches[@]}"; do + for c in "${components[@]}"; do + mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch" + done + done + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename}*.deb -o \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Components=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done +done diff --git a/hack/make/release-rpm b/hack/make/release-rpm new file mode 100755 index 0000000..477d15b --- /dev/null +++ b/hack/make/release-rpm @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +set -e + +# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm +# +# The following can then be used as a yum repo: +# http://yum.dockerproject.org/repo/$release/$distro/$distro-version +# +# For example: +# http://yum.dockerproject.org/repo/main/fedora/23 +# http://yum.dockerproject.org/repo/testing/centos/7 +# http://yum.dockerproject.org/repo/experimental/fedora/23 +# http://yum.dockerproject.org/repo/main/centos/7 +# +# ... and so on and so forth for the builds created by hack/make/build-rpm + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo +: ${GPG_KEYID:=releasedocker} + +# get the release +release="main" + +if [[ "$VERSION" == *-rc* ]]; then + release="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + release="experimental" +fi + +# Setup the yum repo +for dir in bundles/$VERSION/build-rpm/*/; do + version="$(basename "$dir")" + suite="${version##*-}" + distro="${version%-*}" + + REPO=$YUMDIR/$release/$distro + + # if the directory does not exist, initialize the yum repo + if [[ ! -d $REPO/$suite/Packages ]]; then + mkdir -p "$REPO/$suite/Packages" + + createrepo --pretty "$REPO/$suite" + fi + + # path to rpms + RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) + + # if we have a $GPG_PASSPHRASE we may as well + # sign the rpms before adding to repo + if [ ! -z $GPG_PASSPHRASE ]; then + # export our key to rpm import + gpg --armor --export "$GPG_KEYID" > /tmp/gpg + rpm --import /tmp/gpg + + # sign the rpms + echo "yes" | setsid rpm \ + --define "_gpg_name $GPG_KEYID" \ + --define "_signature gpg" \ + --define "__gpg_check_password_cmd /bin/true" \ + --define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \ + --resign "${RPMFILE[@]}" + fi + + # copy the rpms to the packages folder + cp "${RPMFILE[@]}" "$REPO/$suite/Packages" + + # update the repo + createrepo --pretty --update "$REPO/$suite" +done diff --git a/hack/make/run b/hack/make/run new file mode 100644 index 0000000..366fea6 --- /dev/null +++ b/hack/make/run @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -e +rm -rf "$DEST" + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before run' + false +fi + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + + +listen_port=2375 +if [ -n "$DOCKER_PORT" ]; then + IFS=':' read -r -a ports <<< "$DOCKER_PORT" + listen_port="${ports[-1]}" +fi + +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +args="--debug \ + --host tcp://0.0.0.0:${listen_port} --host unix:///var/run/docker.sock \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params" + +echo dockerd $args +exec dockerd $args diff --git a/hack/make/sign-repos b/hack/make/sign-repos new file mode 100755 index 0000000..61dbd7a --- /dev/null +++ b/hack/make/sign-repos @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# This script signs the deliverables from release-deb and release-rpm +# with a designated GPG key. + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo + +if [ -z "$GPG_PASSPHRASE" ]; then + echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' + exit 1 +fi + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before sign-repos' + exit 1 +fi + +sign_packages(){ + # sign apt repo metadata + if [ -d $APTDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg" + + # sign the repo metadata + for F in $(find $APTDIR -name Release); do + if test "$F" -nt "$F.gpg" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.gpg" "$F" + fi + inRelease="$(dirname "$F")/InRelease" + if test "$F" -nt "$inRelease" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --clearsign \ + --batch --yes \ + --output "$inRelease" "$F" + fi + done + fi + + # sign yum repo metadata + if [ -d $YUMDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg" + + # sign the repo metadata + for F in $(find $YUMDIR -name repomd.xml); do + if test "$F" -nt "$F.asc" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.asc" "$F" + fi + done + fi +} + +sign_packages diff --git a/hack/make/test-deb-install b/hack/make/test-deb-install new file mode 100755 index 0000000..bc8e408 --- /dev/null +++ b/hack/make/test-deb-install @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# This script is used for testing install.sh and that it works for +# each of component of our apt and yum repos +set -e + +: ${DEB_DIR:="$(pwd)/bundles/$(cat VERSION)/build-deb"} + +if [[ ! -d "${DEB_DIR}" ]]; then + echo "you must first run `make deb` or hack/make/build-deb" + exit 1 +fi + +test_deb_install(){ + # test for each Dockerfile in contrib/builder + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" + local dir=$(basename "$dir") + + if [[ ! -d "${DEB_DIR}/${dir}" ]]; then + echo "No deb found for ${dir}" + exit 1 + fi + + local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) + cat <<-EOF > "${script}" + #!/bin/bash + set -e + set -x + + apt-get update && apt-get install -y apparmor + + dpkg -i /root/debs/*.deb || true + + apt-get install -yf + + /etc/init.d/apparmor start + + # this will do everything _except_ load the profile into the kernel + ( + cd /etc/apparmor.d + /sbin/apparmor_parser --skip-kernel-load docker-engine + ) + EOF + + chmod +x "${script}" + + echo "testing deb install for ${from}" + docker run --rm -i --privileged \ + -v ${DEB_DIR}/${dir}:/root/debs \ + -v ${script}:/install.sh \ + ${from} /install.sh + + rm -f ${script} + done +} + +( + bundle .integration-daemon-start + test_deb_install + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-docker-py b/hack/make/test-docker-py new file mode 100644 index 0000000..8739df2 --- /dev/null +++ b/hack/make/test-docker-py @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + dockerPy="$DEST/docker-py" + git clone https://github.com/zozo123/docker-py.git "$dockerPy" + + # exporting PYTHONPATH to import "docker" from our local docker-py + test_env PYTHONPATH="$dockerPy" py.test --junitxml="$DEST/results.xml" "$dockerPy/tests/integration" + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-install-script b/hack/make/test-install-script new file mode 100755 index 0000000..aec1287 --- /dev/null +++ b/hack/make/test-install-script @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# This script is used for testing install.sh and that it works for +# each of component of our apt and yum repos +set -e + +test_install_script(){ + # these are equivalent to main, testing, experimental components + # in the repos, but its the url that will do the conversion + components=( experimental test get ) + + for component in "${components[@]}"; do + # change url to specific component for testing + local test_url=https://${component}.docker.com + local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) + sed "s,url='https://get.docker.com/',url='${test_url}/'," hack/install.sh > "${script}" + + chmod +x "${script}" + + # test for each Dockerfile in contrib/builder + for dir in contrib/builder/*/*/; do + local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" + + echo "running install.sh for ${component} with ${from}" + docker run --rm -i -v ${script}:/install.sh ${from} /install.sh + done + + rm -f ${script} + done +} + +test_install_script diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli new file mode 100755 index 0000000..c8137f4 --- /dev/null +++ b/hack/make/test-integration-cli @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + bundle .integration-daemon-setup + + bundle_test_integration_cli + + bundle .integration-daemon-stop + + if [ "$(go env GOOS)" != 'windows' ] + then + leftovers=$(ps -ax -o pid,cmd | awk '$2 == "docker-containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration-cli/ { print $1 }') + if [ -n "$leftovers" ] + then + ps aux + kill -9 $leftovers 2> /dev/null + echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!" + exit 1 + fi + fi + +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-integration-shell b/hack/make/test-integration-shell new file mode 100644 index 0000000..2201f5e --- /dev/null +++ b/hack/make/test-integration-shell @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +bundle .integration-daemon-start +bundle .integration-daemon-setup + +export ABS_DEST +bash +e diff --git a/hack/make/test-old-apt-repo b/hack/make/test-old-apt-repo new file mode 100755 index 0000000..e92b20e --- /dev/null +++ b/hack/make/test-old-apt-repo @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -e + +versions=( 1.3.3 1.4.1 1.5.0 1.6.2 ) + +install() { + local version=$1 + local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX) + local dockerfile="${tmpdir}/Dockerfile" + cat <<-EOF > "$dockerfile" + FROM debian:jessie + ENV VERSION ${version} + RUN apt-get update && apt-get install -y \ + apt-transport-https \ + ca-certificates \ + --no-install-recommends + RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list + RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ + --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + RUN apt-get update && apt-get install -y \ + lxc-docker-\${VERSION} + EOF + + docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir +} + +for v in "${versions[@]}"; do + install "$v" +done diff --git a/hack/make/test-unit b/hack/make/test-unit new file mode 100644 index 0000000..41dc29b --- /dev/null +++ b/hack/make/test-unit @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +set -e + +# Run Docker's test suite, including sub-packages, and store their output as a bundle +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +bundle_test_unit() { + TESTFLAGS+=" -test.timeout=${TIMEOUT}" + INCBUILD="-i" + count=0 + for flag in "${BUILDFLAGS[@]}"; do + if [ "${flag}" == ${INCBUILD} ]; then + unset BUILDFLAGS[${count}] + break + fi + count=$[ ${count} + 1 ] + done + + date + if [ -z "$TESTDIRS" ]; then + TEST_PATH=./... + else + TEST_PATH=./${TESTDIRS} + fi + + source "${MAKEDIR}/.go-autogen" + + if [ "$(go env GOHOSTOS)" = 'solaris' ]; then + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/daemon/graphdriver \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli \ + | grep -v github.com/docker/docker/daemon/cluster) + else + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli \ + | grep -v github.com/docker/docker/daemon/cluster) + fi + + go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list + go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS github.com/docker/docker/pkg/term -test.root +} + +bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/tgz b/hack/make/tgz new file mode 100644 index 0000000..1fd37b6 --- /dev/null +++ b/hack/make/tgz @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +echo "tgz is deprecated" diff --git a/hack/make/ubuntu b/hack/make/ubuntu new file mode 100644 index 0000000..ad3f1d7 --- /dev/null +++ b/hack/make/ubuntu @@ -0,0 +1,190 @@ +#!/usr/bin/env bash + +PKGVERSION="${VERSION//-/'~'}" +# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + GIT_UNIX="$(git log -1 --pretty='%at')" + GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" + GIT_COMMIT="$(git log -1 --pretty='%h')" + GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" + # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' + PKGVERSION="$PKGVERSION~$GIT_VERSION" +fi + +# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false +# true + +# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + +PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" +PACKAGE_URL="https://www.docker.com/" +PACKAGE_MAINTAINER="support@docker.com" +PACKAGE_DESCRIPTION="Linux container runtime +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. +Docker is a great building block for automating distributed systems: +large-scale web deployments, database clusters, continuous deployment systems, +private PaaS, service-oriented architectures, etc." +PACKAGE_LICENSE="Apache-2.0" + +# Build docker as an ubuntu package using FPM and REPREPRO (sue me). +# bundle_binary must be called first. +bundle_ubuntu() { + DIR="$ABS_DEST/build" + + # Include our udev rules + mkdir -p "$DIR/etc/udev/rules.d" + cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" + + # Include our init scripts + mkdir -p "$DIR/etc/init" + cp contrib/init/upstart/docker.conf "$DIR/etc/init/" + mkdir -p "$DIR/etc/init.d" + cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" + mkdir -p "$DIR/etc/default" + cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" + mkdir -p "$DIR/lib/systemd/system" + cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" + + # Include contributed completions + mkdir -p "$DIR/etc/bash_completion.d" + cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" + mkdir -p "$DIR/usr/share/zsh/vendor-completions" + cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" + mkdir -p "$DIR/etc/fish/completions" + cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" + + # Include man pages + make manpages + manRoot="$DIR/usr/share/man" + mkdir -p "$manRoot" + for manDir in man/man?; do + manBase="$(basename "$manDir")" # "man1" + for manFile in "$manDir"/*; do + manName="$(basename "$manFile")" # "docker-build.1" + mkdir -p "$manRoot/$manBase" + gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" + done + done + + # Copy the binary + # This will fail if the binary bundle hasn't been built + mkdir -p "$DIR/usr/bin" + cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" + + # Generate postinst/prerm/postrm scripts + cat > "$DEST/postinst" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi + +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi +if [ -n "$2" ]; then + _dh_action=restart +else + _dh_action=start +fi +service docker $_dh_action 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/prerm" <<'EOF' +#!/bin/sh +set -e +set -u + +service docker stop 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/postrm" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = "purge" ] ; then + update-rc.d docker remove > /dev/null || true +fi + +# In case this system is running systemd, we make systemd reload the unit files +# to pick up changes. +if [ -d /run/systemd/system ] ; then + systemctl --system daemon-reload > /dev/null || true +fi + +#DEBHELPER# +EOF + # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way + chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + + ( + # switch directories so we create *.deb in the right folder + cd "$DEST" + + # create lxc-docker-VERSION package + fpm -s dir -C "$DIR" \ + --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ + --after-install "$ABS_DEST/postinst" \ + --before-remove "$ABS_DEST/prerm" \ + --after-remove "$ABS_DEST/postrm" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ + --deb-suggests apparmor \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package + fpm -s empty \ + --name lxc-docker --version "$PKGVERSION" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb + ) + + # clean up after ourselves so we have a clean output directory + rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + rm -r "$DIR" +} + +bundle_ubuntu diff --git a/hack/make/update-apt-repo b/hack/make/update-apt-repo new file mode 100755 index 0000000..3a80c94 --- /dev/null +++ b/hack/make/update-apt-repo @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +set -e + +# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo. +# This script is a "fix all" for any sort of problems that might have occurred with +# the Release or Package files in the repo. +# It should only be used in the rare case of extreme emergencies to regenerate +# Release and Package files for the apt repo. +# +# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running +# this script. + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# supported arches/sections +arches=( amd64 i386 ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then + components+=( $component ) + fi +done + +dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) ) + +# override component if it is set +if [ "$COMPONENT" ]; then + components=( $COMPONENT ) +fi + +# release the debs +for version in "${dists[@]}"; do + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" + done +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dist in "${dists[@]}"; do + version=$(basename "$dist") + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Component=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done + done +done diff --git a/hack/make/win b/hack/make/win new file mode 100644 index 0000000..bc6d510 --- /dev/null +++ b/hack/make/win @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 + [windows/amd64]=1 +) +platform="windows/amd64" +export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION +mkdir -p "$DEST" +ABS_DEST="$(cd "$DEST" && pwd -P)" +export GOOS=${platform%/*} +export GOARCH=${platform##*/} +if [ -z "${daemonSupporting[$platform]}" ]; then + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported +fi +source "${MAKEDIR}/binary" diff --git a/hack/release.sh b/hack/release.sh new file mode 100755 index 0000000..fbbcde5 --- /dev/null +++ b/hack/release.sh @@ -0,0 +1,325 @@ +#!/usr/bin/env bash +set -e + +# This script looks for bundles built by make.sh, and releases them on a +# public S3 bucket. +# +# Bundles should be available for the VERSION string passed as argument. +# +# The correct way to call this script is inside a container built by the +# official Dockerfile at the root of the Docker source code. The Dockerfile, +# make.sh and release.sh should all be from the same source code revision. + +set -o pipefail + +# Print a usage message and exit. +usage() { + cat >&2 <<'EOF' +To run, I need: +- to be in a container generated by the Dockerfile at the top of the Docker + repository; +- to be provided with the location of an S3 bucket and path, in + environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); +- to be provided with AWS credentials for this S3 bucket, in environment + variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY; +- a generous amount of good will and nice manners. +The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" + +docker run -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -it --privileged \ + docker ./hack/release.sh +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage +[ "$AWS_ACCESS_KEY_ID" ] || usage +[ "$AWS_SECRET_ACCESS_KEY" ] || usage +[ -d /go/src/github.com/docker/docker ] || usage +cd /go/src/github.com/docker/docker +[ -x hack/make.sh ] || usage + +export AWS_DEFAULT_REGION +: ${AWS_DEFAULT_REGION:=us-west-1} + +AWS_CLI=${AWS_CLI:-'aws'} + +RELEASE_BUNDLES=( + binary + cross + tgz +) + +if [ "$1" != '--release-regardless-of-test-failure' ]; then + RELEASE_BUNDLES=( + test-unit + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) +fi + +VERSION=$(< VERSION) +BUCKET=$AWS_S3_BUCKET +BUCKET_PATH=$BUCKET +[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH + +if command -v git &> /dev/null && git rev-parse &> /dev/null; then + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + echo "You cannot run the release script on a repo with uncommitted changes" + usage + fi +fi + +# These are the 2 keys we've used to sign the deb's +# release (get.docker.com) +# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" +# test (test.docker.com) +# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" + +setup_s3() { + echo "Setting up S3" + # Try creating the bucket. Ignore errors (it might already exist). + $AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true + # Check access to the bucket. + $AWS_CLI s3 ls "s3://$BUCKET" >/dev/null + # Make the bucket accessible through website endpoints. + $AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET" +} + +# write_to_s3 uploads the contents of standard input to the specified S3 url. +write_to_s3() { + DEST=$1 + F=`mktemp` + cat > "$F" + $AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST" + rm -f "$F" +} + +s3_url() { + case "$BUCKET" in + get.docker.com|test.docker.com|experimental.docker.com) + echo "https://$BUCKET_PATH" + ;; + *) + BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com" + if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then + echo "$BASE_URL/$AWS_S3_BUCKET_PATH" + else + echo "$BASE_URL" + fi + ;; + esac +} + +build_all() { + echo "Building release" + if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then + echo >&2 + echo >&2 'The build or tests appear to have failed.' + echo >&2 + echo >&2 'You, as the release maintainer, now have a couple options:' + echo >&2 '- delay release and fix issues' + echo >&2 '- delay release and fix issues' + echo >&2 '- did we mention how important this is? issues need fixing :)' + echo >&2 + echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' + echo >&2 ' really knows all the hairy problems at hand with the current release' + echo >&2 ' issues) may bypass this checking by running this script again with the' + echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' + echo >&2 ' running the test suite, and will only build the binaries and packages. Please' + echo >&2 ' avoid using this if at all possible.' + echo >&2 + echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' + echo >&2 ' should be used. If there are release issues, we should always err on the' + echo >&2 ' side of caution.' + echo >&2 + exit 1 + fi +} + +upload_release_build() { + src="$1" + dst="$2" + latest="$3" + + echo + echo "Uploading $src" + echo " to $dst" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst" + if [ "$latest" ]; then + echo + echo "Copying to $latest" + echo + $AWS_CLI s3 cp --acl public-read "$dst" "$latest" + fi + + # get hash files too (see hash_files() in hack/make.sh) + for hashAlgo in md5 sha256; do + if [ -e "$src.$hashAlgo" ]; then + echo + echo "Uploading $src.$hashAlgo" + echo " to $dst.$hashAlgo" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo" + if [ "$latest" ]; then + echo + echo "Copying to $latest.$hashAlgo" + echo + $AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo" + fi + fi + done +} + +release_build() { + echo "Releasing binaries" + GOOS=$1 + GOARCH=$2 + + binDir=bundles/$VERSION/cross/$GOOS/$GOARCH + tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH + binary=docker-$VERSION + zipExt=".tgz" + binaryExt="" + tgz=$binary$zipExt + + latestBase= + if [ -z "$NOLATEST" ]; then + latestBase=docker-latest + fi + + # we need to map our GOOS and GOARCH to uname values + # see https://en.wikipedia.org/wiki/Uname + # ie, GOOS=linux -> "uname -s"=Linux + + s3Os=$GOOS + case "$s3Os" in + darwin) + s3Os=Darwin + ;; + freebsd) + s3Os=FreeBSD + ;; + linux) + s3Os=Linux + ;; + solaris) + echo skipping solaris release + return 0 + ;; + windows) + # this is windows use the .zip and .exe extensions for the files. + s3Os=Windows + zipExt=".zip" + binaryExt=".exe" + tgz=$binary$zipExt + binary+=$binaryExt + ;; + *) + echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" + exit 1 + ;; + esac + + s3Arch=$GOARCH + case "$s3Arch" in + amd64) + s3Arch=x86_64 + ;; + 386) + s3Arch=i386 + ;; + arm) + s3Arch=armel + # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too + ;; + *) + echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" + exit 1 + ;; + esac + + s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" + # latest= + latestTgz= + if [ "$latestBase" ]; then + # commented out since we aren't uploading binaries right now. + # latest="$s3Dir/$latestBase$binaryExt" + # we don't include the $binaryExt because we don't want docker.exe.zip + latestTgz="$s3Dir/$latestBase$zipExt" + fi + + if [ ! -f "$tgzDir/$tgz" ]; then + echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" + exit 1 + fi + # disable binary uploads for now. Only providing tgz downloads + # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" + upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" +} + +# Upload binaries and tgz files to S3 +release_binaries() { + [ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || { + echo >&2 './hack/make.sh must be run before release_binaries' + exit 1 + } + + for d in bundles/$VERSION/cross/*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + release_build "$GOOS" "$GOARCH" + done + + # TODO create redirect from builds/*/i686 to builds/*/i386 + + cat <&2 + exit 1 +fi + +grep -e '^## ' "$changelogFile" | awk '{print$3}' | sort -c -r || exit 2 + +echo "Congratulations! Changelog $changelogFile dates are in descending order." diff --git a/hack/validate/changelog-well-formed b/hack/validate/changelog-well-formed new file mode 100755 index 0000000..6c7ce1a --- /dev/null +++ b/hack/validate/changelog-well-formed @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +changelogFile=${1:-CHANGELOG.md} + +if [ ! -r "$changelogFile" ]; then + echo "Unable to read file $changelogFile" >&2 + exit 1 +fi + +changelogWellFormed=1 + +# e.g. "## 1.12.3 (2016-10-26)" +VER_LINE_REGEX='^## [0-9]+\.[0-9]+\.[0-9]+(-ce)? \([0-9]+-[0-9]+-[0-9]+\)$' +while read -r line; do + if ! [[ "$line" =~ $VER_LINE_REGEX ]]; then + echo "Malformed changelog $changelogFile line \"$line\"" >&2 + changelogWellFormed=0 + fi +done < <(grep '^## ' $changelogFile) + +if [[ "$changelogWellFormed" == "1" ]]; then + echo "Congratulations! Changelog $changelogFile is well-formed." +else + exit 2 +fi diff --git a/hack/validate/dco b/hack/validate/dco new file mode 100755 index 0000000..f391001 --- /dev/null +++ b/hack/validate/dco @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') +dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') +#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" + +: ${adds:=0} +: ${dels:=0} + +# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" +githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + +# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work +dcoPrefix='Signed-off-by:' +dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" + +check_dco() { + grep -qE "$dcoRegex" +} + +if [ $adds -eq 0 -a $dels -eq 0 ]; then + echo '0 adds, 0 deletions; nothing to validate! :)' +else + commits=( $(validate_log --format='format:%H%n') ) + badCommits=() + for commit in "${commits[@]}"; do + if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then + # no content (ie, Merge commit, etc) + continue + fi + if ! git log -1 --format='format:%B' "$commit" | check_dco; then + badCommits+=( "$commit" ) + fi + done + if [ ${#badCommits[@]} -eq 0 ]; then + echo "Congratulations! All commits are properly signed with the DCO!" + else + { + echo "These commits do not have a proper '$dcoPrefix' marker:" + for commit in "${badCommits[@]}"; do + echo " - $commit" + done + echo + echo 'Please amend each commit to include a properly formatted DCO marker.' + echo + echo 'Visit the following URL for information about the Docker DCO:' + echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' + echo + } >&2 + false + fi +fi diff --git a/hack/validate/default b/hack/validate/default new file mode 100755 index 0000000..e243f43 --- /dev/null +++ b/hack/validate/default @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Run default validation, exclude vendor because it's slow + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +. $SCRIPTDIR/dco +. $SCRIPTDIR/default-seccomp +. $SCRIPTDIR/gofmt +. $SCRIPTDIR/lint +. $SCRIPTDIR/pkg-imports +. $SCRIPTDIR/swagger +. $SCRIPTDIR/swagger-gen +. $SCRIPTDIR/test-imports +. $SCRIPTDIR/toml +. $SCRIPTDIR/vet +. $SCRIPTDIR/changelog-well-formed +. $SCRIPTDIR/changelog-date-descending diff --git a/hack/validate/default-seccomp b/hack/validate/default-seccomp new file mode 100755 index 0000000..24cbf00 --- /dev/null +++ b/hack/validate/default-seccomp @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'profiles/seccomp' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + # We run 'go generate' and see if we have a diff afterwards + go generate ./profiles/seccomp/ >/dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- profiles/seccomp 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of go generate ./profiles/seccomp/ differs' + echo + echo "$diffs" + echo + echo 'Please re-run go generate ./profiles/seccomp/' + echo + } >&2 + false + else + echo 'Congratulations! Seccomp profile generation is done correctly.' + fi +fi diff --git a/hack/validate/gofmt b/hack/validate/gofmt new file mode 100755 index 0000000..38027a9 --- /dev/null +++ b/hack/validate/gofmt @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | + grep -v '^vendor/' | + grep -v '\.pb\.go$' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff --git a/hack/validate/lint b/hack/validate/lint new file mode 100755 index 0000000..341490a --- /dev/null +++ b/hack/validate/lint @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '\.pb\.go$' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedLint=$(golint "$f") + if [ "$failedLint" ]; then + errors+=( "$failedLint" ) + fi +done + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been linted.' +else + { + echo "Errors from golint:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please fix the above errors. You can test via "golint" and commit the result.' + echo + } >&2 + false +fi diff --git a/hack/validate/pkg-imports b/hack/validate/pkg-imports new file mode 100755 index 0000000..a9aab64 --- /dev/null +++ b/hack/validate/pkg-imports @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -e + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + IFS=$'\n' + badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -vE '^github.com/docker/docker/vendor' | grep -E '^github.com/docker/docker' || true) ) + unset IFS + + for import in "${badImports[@]}"; do + badFiles+=( "$f imports $import" ) + done +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! "./pkg/..." is safely isolated from internal code.' +else + { + echo 'These files import internal code: (either directly or indirectly)' + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/hack/validate/swagger b/hack/validate/swagger new file mode 100755 index 0000000..0b3c271 --- /dev/null +++ b/hack/validate/swagger @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -e +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + yamllint -c ${SCRIPTDIR}/.swagger-yamllint api/swagger.yaml + swagger validate api/swagger.yaml +fi diff --git a/hack/validate/swagger-gen b/hack/validate/swagger-gen new file mode 100755 index 0000000..451ea3f --- /dev/null +++ b/hack/validate/swagger-gen @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + ${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- api/types/ 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of hack/generate-swagger-api.sh differs' + echo + echo "$diffs" + echo + echo 'Please update api/swagger.yaml with any api changes, then ' + echo 'run `hack/generate-swagger-api.sh`.' + } >&2 + false + else + echo 'Congratulations! All api changes are done the right way.' + fi +else + echo 'No api/types/ or api/swagger.yaml changes in diff.' +fi diff --git a/hack/validate/test-imports b/hack/validate/test-imports new file mode 100755 index 0000000..0e836a3 --- /dev/null +++ b/hack/validate/test-imports @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Make sure we're not using gos' Testing package any more in integration-cli + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # skip check_test.go since it *does* use the testing package + if [ "$f" = "integration-cli/check_test.go" ]; then + continue + fi + + # we use "git show" here to validate that what's committed doesn't contain golang built-in testing + if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then + if [ "$(echo $f | grep '_test')" ]; then + # allow testing.T for non- _test files + badFiles+=( "$f" ) + fi + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! No testing.T found.' +else + { + echo "These files use the wrong testing infrastructure:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/hack/validate/toml b/hack/validate/toml new file mode 100755 index 0000000..d5b2ce1 --- /dev/null +++ b/hack/validate/toml @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed has valid toml syntax + if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All toml source files changed here have valid syntax.' +else + { + echo "These files are not valid toml:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files as valid toml' + echo + } >&2 + false +fi diff --git a/hack/validate/vendor b/hack/validate/vendor new file mode 100755 index 0000000..69160a9 --- /dev/null +++ b/hack/validate/vendor @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +validate_vendor_diff(){ + IFS=$'\n' + files=( $(validate_diff --diff-filter=ACMR --name-only -- 'vendor.conf' 'vendor/' || true) ) + unset IFS + + if [ ${#files[@]} -gt 0 ]; then + # We run vndr to and see if we have a diff afterwards + vndr + # Let see if the working directory is clean + diffs="$(git status --porcelain -- vendor 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of vndr differs' + echo + echo "$diffs" + echo + echo 'Please vendor your package with github.com/LK4D4/vndr.' + echo + } >&2 + false + else + echo 'Congratulations! All vendoring changes are done the right way.' + fi + else + echo 'No vendor changes in diff.' + fi +} + +# 1. make sure all the vendored packages are used +# 2. make sure all the packages contain license information (just warning, because it can cause false-positive) +validate_vendor_used() { + pkgs=$(mawk '/^[a-zA-Z0-9]/ { print $1 }' < vendor.conf) + for f in $pkgs; do + if ls -d vendor/$f > /dev/null 2>&1; then + found=$(find vendor/$f -iregex '.*LICENSE.*' -or -iregex '.*COPYRIGHT.*' -or -iregex '.*COPYING.*' | wc -l) + if [ $found -eq 0 ]; then + echo "WARNING: could not find copyright information for $f" + fi + else + echo "WARNING: $f is vendored but unused" + fi + done +} + +validate_vendor_diff +validate_vendor_used diff --git a/hack/validate/vet b/hack/validate/vet new file mode 100755 index 0000000..95dc7a7 --- /dev/null +++ b/hack/validate/vet @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedVet=$(go vet "$f") + if [ "$failedVet" ]; then + errors+=( "$failedVet" ) + fi +done + + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been vetted.' +else + { + echo "Errors from go vet:" + for err in "${errors[@]}"; do + echo " - $err" + done + echo + echo 'Please fix the above errors. You can test via "go vet" and commit the result.' + echo + } >&2 + false +fi diff --git a/hack/vendor.sh b/hack/vendor.sh new file mode 100755 index 0000000..0bba30f --- /dev/null +++ b/hack/vendor.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. +# For updating dependencies you should change `vendor.conf` file in root of the +# project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for +# vndr usage. + +set -e + +if ! hash vndr; then + echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" + exit 1 +fi + +vndr -whitelist 'VERSION' "$@" diff --git a/image/cache/cache.go b/image/cache/cache.go new file mode 100644 index 0000000..e074beb --- /dev/null +++ b/image/cache/cache.go @@ -0,0 +1,253 @@ +package cache + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// NewLocal returns a local image cache, based on parent chain +func NewLocal(store image.Store) *LocalImageCache { + return &LocalImageCache{ + store: store, + } +} + +// LocalImageCache is cache based on parent chain. +type LocalImageCache struct { + store image.Store +} + +// GetCache returns the image id found in the cache +func (lic *LocalImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) { + return getImageIDAndError(getLocalCachedImage(lic.store, image.ID(imgID), config)) +} + +// New returns an image cache, based on history objects +func New(store image.Store) *ImageCache { + return &ImageCache{ + store: store, + localImageCache: NewLocal(store), + } +} + +// ImageCache is cache based on history objects. Requires initial set of images. +type ImageCache struct { + sources []*image.Image + store image.Store + localImageCache *LocalImageCache +} + +// Populate adds an image to the cache (to be queried later) +func (ic *ImageCache) Populate(image *image.Image) { + ic.sources = append(ic.sources, image) +} + +// GetCache returns the image id found in the cache +func (ic *ImageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { + imgID, err := ic.localImageCache.GetCache(parentID, cfg) + if err != nil { + return "", err + } + if imgID != "" { + for _, s := range ic.sources { + if ic.isParent(s.ID(), image.ID(imgID)) { + return imgID, nil + } + } + } + + var parent *image.Image + lenHistory := 0 + if parentID != "" { + parent, err = ic.store.Get(image.ID(parentID)) + if err != nil { + return "", errors.Wrapf(err, "unable to find image %v", parentID) + } + lenHistory = len(parent.History) + } + + for _, target := range ic.sources { + if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { + continue + } + + if len(target.History)-1 == lenHistory { // last + if parent != nil { + if err := ic.store.SetParent(target.ID(), parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return target.ID().String(), nil + } + + imgID, err := ic.restoreCachedImage(parent, target, cfg) + if err != nil { + return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) + } + + ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm + return imgID.String(), nil + } + + return "", nil +} + +func (ic *ImageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { + var history []image.History + rootFS := image.NewRootFS() + lenHistory := 0 + if parent != nil { + history = parent.History + rootFS = parent.RootFS + lenHistory = len(parent.History) + } + history = append(history, target.History[lenHistory]) + if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" { + rootFS.Append(layer) + } + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: cfg, + Architecture: target.Architecture, + OS: target.OS, + Author: target.Author, + Created: history[len(history)-1].Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: target.OSFeatures, + OSVersion: target.OSVersion, + }) + if err != nil { + return "", errors.Wrap(err, "failed to marshal image config") + } + + imgID, err := ic.store.Create(config) + if err != nil { + return "", errors.Wrap(err, "failed to create cache image") + } + + if parent != nil { + if err := ic.store.SetParent(imgID, parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return imgID, nil +} + +func (ic *ImageCache) isParent(imgID, parentID image.ID) bool { + nextParent, err := ic.store.GetParent(imgID) + if err != nil { + return false + } + if nextParent == parentID { + return true + } + return ic.isParent(nextParent, parentID) +} + +func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID { + layerIndex := 0 + for i, h := range image.History { + if i == index { + if h.EmptyLayer { + return "" + } + break + } + if !h.EmptyLayer { + layerIndex++ + } + } + return image.RootFS.DiffIDs[layerIndex] // validate? +} + +func isValidConfig(cfg *containertypes.Config, h image.History) bool { + // todo: make this format better than join that loses data + return strings.Join(cfg.Cmd, " ") == h.CreatedBy +} + +func isValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 { + return false + } + if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 { + return true + } + if len(parent.History) >= len(img.History) { + return false + } + if len(parent.RootFS.DiffIDs) > len(img.RootFS.DiffIDs) { + return false + } + + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + for i, d := range parent.RootFS.DiffIDs { + if d != img.RootFS.DiffIDs[i] { + return false + } + } + return true +} + +func getImageIDAndError(img *image.Image, err error) (string, error) { + if img == nil || err != nil { + return "", err + } + return img.ID().String(), nil +} + +// getLocalCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func getLocalCachedImage(imageStore image.Store, imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := imageStore.Children(imgID) + return getMatch(siblings) +} diff --git a/image/cache/compare.go b/image/cache/compare.go new file mode 100644 index 0000000..9237932 --- /dev/null +++ b/image/cache/compare.go @@ -0,0 +1,63 @@ +package cache + +import ( + "github.com/docker/docker/api/types/container" +) + +// compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func compare(a, b *container.Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/image/cache/compare_test.go b/image/cache/compare_test.go new file mode 100644 index 0000000..7cc0589 --- /dev/null +++ b/image/cache/compare_test.go @@ -0,0 +1,126 @@ +package cache + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestCompare(t *testing.T) { + ports1 := make(nat.PortSet) + ports1[newPortNoError("tcp", "1111")] = struct{}{} + ports1[newPortNoError("tcp", "2222")] = struct{}{} + ports2 := make(nat.PortSet) + ports2[newPortNoError("tcp", "3333")] = struct{}{} + ports2[newPortNoError("tcp", "4444")] = struct{}{} + ports3 := make(nat.PortSet) + ports3[newPortNoError("tcp", "1111")] = struct{}{} + ports3[newPortNoError("tcp", "2222")] = struct{}{} + ports3[newPortNoError("tcp", "5555")] = struct{}{} + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + volumes3 := make(map[string]struct{}) + volumes3["/test1"] = struct{}{} + volumes3["/test3"] = struct{}{} + envs1 := []string{"ENV1=value1", "ENV2=value2"} + envs2 := []string{"ENV1=value1", "ENV3=value3"} + entrypoint1 := strslice.StrSlice{"/bin/sh", "-c"} + entrypoint2 := strslice.StrSlice{"/bin/sh", "-d"} + entrypoint3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + cmd1 := strslice.StrSlice{"/bin/sh", "-c"} + cmd2 := strslice.StrSlice{"/bin/sh", "-d"} + cmd3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} + labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} + labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} + + sameConfigs := map[*container.Config]*container.Config{ + // Empty config + &container.Config{}: {}, + // Does not compare hostname, domainname & image + &container.Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user", + }: { + Hostname: "host2", + Domainname: "domain2", + Image: "image2", + User: "user", + }, + // only OpenStdin + &container.Config{OpenStdin: false}: {OpenStdin: false}, + // only env + &container.Config{Env: envs1}: {Env: envs1}, + // only cmd + &container.Config{Cmd: cmd1}: {Cmd: cmd1}, + // only labels + &container.Config{Labels: labels1}: {Labels: labels1}, + // only exposedPorts + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, + // only entrypoints + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + // only volumes + &container.Config{Volumes: volumes1}: {Volumes: volumes1}, + } + differentConfigs := map[*container.Config]*container.Config{ + nil: nil, + &container.Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user1", + }: { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user2", + }, + // only OpenStdin + &container.Config{OpenStdin: false}: {OpenStdin: true}, + &container.Config{OpenStdin: true}: {OpenStdin: false}, + // only env + &container.Config{Env: envs1}: {Env: envs2}, + // only cmd + &container.Config{Cmd: cmd1}: {Cmd: cmd2}, + // not the same number of parts + &container.Config{Cmd: cmd1}: {Cmd: cmd3}, + // only labels + &container.Config{Labels: labels1}: {Labels: labels2}, + // not the same number of labels + &container.Config{Labels: labels1}: {Labels: labels3}, + // only exposedPorts + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, + // not the same number of ports + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, + // only entrypoints + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + // not the same number of parts + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + // only volumes + &container.Config{Volumes: volumes1}: {Volumes: volumes2}, + // not the same number of labels + &container.Config{Volumes: volumes1}: {Volumes: volumes3}, + } + for config1, config2 := range sameConfigs { + if !compare(config1, config2) { + t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) + } + } + for config1, config2 := range differentConfigs { + if compare(config1, config2) { + t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) + } + } +} diff --git a/image/fs.go b/image/fs.go new file mode 100644 index 0000000..10f6dab --- /dev/null +++ b/image/fs.go @@ -0,0 +1,178 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, errors.Wrapf(err, "failed to get digest %s", dgst) + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", errors.Wrap(err, "failed to write digest data") + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + if err := os.Remove(s.contentFile(dgst)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) + if err != nil { + return nil, errors.Wrap(err, "failed to read metadata") + } + return bytes, nil +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/image/fs_test.go b/image/fs_test.go new file mode 100644 index 0000000..5f2437c --- /dev/null +++ b/image/fs_test.go @@ -0,0 +1,275 @@ +package image + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/testutil" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" +) + +func defaultFSStoreBackend(t *testing.T) (StoreBackend, func()) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + assert.NoError(t, err) + + fsBackend, err := NewFSStoreBackend(tmpdir) + assert.NoError(t, err) + + return fsBackend, func() { os.RemoveAll(tmpdir) } +} + +func TestFSGetInvalidData(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foobar")) + assert.NoError(t, err) + + dgst := digest.Digest(id) + + err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600) + assert.NoError(t, err) + + _, err = store.Get(id) + testutil.ErrorContains(t, err, "failed to verify") +} + +func TestFSInvalidSet(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id := digest.FromBytes([]byte("foobar")) + err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700) + assert.NoError(t, err) + + _, err = store.Set([]byte("foobar")) + testutil.ErrorContains(t, err, "failed to write digest data") +} + +func TestFSInvalidRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + assert.NoError(t, err) + defer os.RemoveAll(tmpdir) + + tcases := []struct { + root, invalidFile string + }{ + {"root", "root"}, + {"root", "root/content"}, + {"root", "root/metadata"}, + } + + for _, tc := range tcases { + root := filepath.Join(tmpdir, tc.root) + filePath := filepath.Join(tmpdir, tc.invalidFile) + err := os.MkdirAll(filepath.Dir(filePath), 0700) + assert.NoError(t, err) + + f, err := os.Create(filePath) + assert.NoError(t, err) + f.Close() + + _, err = NewFSStoreBackend(root) + testutil.ErrorContains(t, err, "failed to create storage backend") + + os.RemoveAll(root) + } + +} + +func TestFSMetadataGetSet(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + id2, err := store.Set([]byte("bar")) + assert.NoError(t, err) + + tcases := []struct { + id digest.Digest + key string + value []byte + }{ + {id, "tkey", []byte("tval1")}, + {id, "tkey2", []byte("tval2")}, + {id2, "tkey", []byte("tval3")}, + } + + for _, tc := range tcases { + err = store.SetMetadata(tc.id, tc.key, tc.value) + assert.NoError(t, err) + + actual, err := store.GetMetadata(tc.id, tc.key) + assert.NoError(t, err) + + if bytes.Compare(actual, tc.value) != 0 { + t.Fatalf("Metadata expected %q, got %q", tc.value, actual) + } + } + + _, err = store.GetMetadata(id2, "tkey2") + testutil.ErrorContains(t, err, "failed to read metadata") + + id3 := digest.FromBytes([]byte("baz")) + err = store.SetMetadata(id3, "tkey", []byte("tval")) + testutil.ErrorContains(t, err, "failed to get digest") + + _, err = store.GetMetadata(id3, "tkey") + testutil.ErrorContains(t, err, "failed to get digest") +} + +func TestFSInvalidWalker(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + fooID, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, "sha256/foobar"), []byte("foobar"), 0600) + assert.NoError(t, err) + + n := 0 + err = store.Walk(func(id digest.Digest) error { + assert.Equal(t, fooID, id) + n++ + return nil + }) + assert.NoError(t, err) + assert.Equal(t, 1, n) +} + +func TestFSGetSet(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + type tcase struct { + input []byte + expected digest.Digest + } + tcases := []tcase{ + {[]byte("foobar"), digest.Digest("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, + } + + randomInput := make([]byte, 8*1024) + _, err := rand.Read(randomInput) + assert.NoError(t, err) + + // skipping use of digest pkg because it is used by the implementation + h := sha256.New() + _, err = h.Write(randomInput) + assert.NoError(t, err) + + tcases = append(tcases, tcase{ + input: randomInput, + expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))), + }) + + for _, tc := range tcases { + id, err := store.Set([]byte(tc.input)) + assert.NoError(t, err) + assert.Equal(t, tc.expected, id) + } + + for _, tc := range tcases { + data, err := store.Get(tc.expected) + assert.NoError(t, err) + if bytes.Compare(data, tc.input) != 0 { + t.Fatalf("expected data %q, got %q", tc.input, data) + } + } +} + +func TestFSGetUnsetKey(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { + _, err := store.Get(key) + testutil.ErrorContains(t, err, "failed to get digest") + } +} + +func TestFSGetEmptyData(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + for _, emptyData := range [][]byte{nil, {}} { + _, err := store.Set(emptyData) + testutil.ErrorContains(t, err, "invalid empty data") + } +} + +func TestFSDelete(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + id2, err := store.Set([]byte("bar")) + assert.NoError(t, err) + + err = store.Delete(id) + assert.NoError(t, err) + + _, err = store.Get(id) + testutil.ErrorContains(t, err, "failed to get digest") + + _, err = store.Get(id2) + assert.NoError(t, err) + + err = store.Delete(id2) + assert.NoError(t, err) + + _, err = store.Get(id2) + testutil.ErrorContains(t, err, "failed to get digest") +} + +func TestFSWalker(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + id2, err := store.Set([]byte("bar")) + assert.NoError(t, err) + + tcases := make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + tcases[id2] = struct{}{} + n := 0 + err = store.Walk(func(id digest.Digest) error { + delete(tcases, id) + n++ + return nil + }) + assert.NoError(t, err) + assert.Equal(t, 2, n) + assert.Len(t, tcases, 0) +} + +func TestFSWalkerStopOnError(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.NoError(t, err) + + tcases := make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + err = store.Walk(func(id digest.Digest) error { + return errors.New("what") + }) + testutil.ErrorContains(t, err, "what") +} diff --git a/image/image.go b/image/image.go new file mode 100644 index 0000000..ab95d93 --- /dev/null +++ b/image/image.go @@ -0,0 +1,220 @@ +package image + +import ( + "encoding/json" + "errors" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// Platform returns the image's operating system. If not populated, defaults to the host runtime OS. +func (img *Image) Platform() string { + os := img.OS + if os == "" { + os = runtime.GOOS + } + return os +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// ChildConfig is the configuration to apply to an Image to create a new +// Child image. Other properties of the image are copied from the parent. +type ChildConfig struct { + ContainerID string + Author string + Comment string + DiffID layer.DiffID + ContainerConfig *container.Config + Config *container.Config +} + +// NewChildImage creates a new Image as a child of this image. +func NewChildImage(img *Image, child ChildConfig, platform string) *Image { + isEmptyLayer := layer.IsEmpty(child.DiffID) + rootFS := img.RootFS + if rootFS == nil { + rootFS = NewRootFS() + } + if !isEmptyLayer { + rootFS.Append(child.DiffID) + } + imgHistory := NewHistory( + child.Author, + child.Comment, + strings.Join(child.ContainerConfig.Cmd, " "), + isEmptyLayer) + + return &Image{ + V1Image: V1Image{ + DockerVersion: dockerversion.Version, + Config: child.Config, + Architecture: runtime.GOARCH, + OS: platform, + Container: child.ContainerID, + ContainerConfig: *child.ContainerConfig, + Author: child.Author, + Created: imgHistory.Created, + }, + RootFS: rootFS, + History: append(img.History, imgHistory), + OSFeatures: img.OSFeatures, + OSVersion: img.OSVersion, + } +} + +// History stores build commands that were used to create an image +type History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// NewHistory creates a new history struct from arguments, and sets the created +// time to the current time in UTC +func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { + return History{ + Author: author, + Created: time.Now().UTC(), + CreatedBy: createdBy, + Comment: comment, + EmptyLayer: isEmptyLayer, + } +} + +// Exporter provides interface for loading and saving images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("invalid image JSON, no RootFS key") + } + + img.rawJSON = src + + return img, nil +} diff --git a/image/image_test.go b/image/image_test.go new file mode 100644 index 0000000..1455947 --- /dev/null +++ b/image/image_test.go @@ -0,0 +1,53 @@ +package image + +import ( + "encoding/json" + "sort" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const sampleImageJSON = `{ + "architecture": "amd64", + "os": "linux", + "config": {}, + "rootfs": { + "type": "layers", + "diff_ids": [] + } +}` + +func TestNewFromJSON(t *testing.T) { + img, err := NewFromJSON([]byte(sampleImageJSON)) + require.NoError(t, err) + assert.Equal(t, sampleImageJSON, string(img.RawJSON())) +} + +func TestNewFromJSONWithInvalidJSON(t *testing.T) { + _, err := NewFromJSON([]byte("{}")) + assert.EqualError(t, err, "invalid image JSON, no RootFS key") +} + +func TestMarshalKeyOrder(t *testing.T) { + b, err := json.Marshal(&Image{ + V1Image: V1Image{ + Comment: "a", + Author: "b", + Architecture: "c", + }, + }) + assert.NoError(t, err) + + expectedOrder := []string{"architecture", "author", "comment"} + var indexes []int + for _, k := range expectedOrder { + indexes = append(indexes, strings.Index(string(b), k)) + } + + if !sort.IntsAreSorted(indexes) { + t.Fatal("invalid key order in JSON: ", string(b)) + } +} diff --git a/image/rootfs.go b/image/rootfs.go new file mode 100644 index 0000000..7b24e3e --- /dev/null +++ b/image/rootfs.go @@ -0,0 +1,44 @@ +package image + +import ( + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/layer" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/image/spec/v1.1.md b/image/spec/v1.1.md new file mode 100644 index 0000000..ce761f1 --- /dev/null +++ b/image/spec/v1.1.md @@ -0,0 +1,637 @@ +# Docker Image Specification v1.1.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.10. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 128 characters. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/image/spec/v1.2.md b/image/spec/v1.2.md new file mode 100644 index 0000000..789680c --- /dev/null +++ b/image/spec/v1.2.md @@ -0,0 +1,696 @@ +# Docker Image Specification v1.2.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.12. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 128 characters. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Healthcheck struct +
+
+ A test to perform to determine whether the container is healthy. + Here is an example: +
{
+  "Test": [
+      "CMD-SHELL",
+      "/usr/bin/check-health localhost"
+  ],
+  "Interval": 30000000000,
+  "Timeout": 10000000000,
+  "Retries": 3
+}
+ The object has the following fields. +
+
+ Test array of strings +
+
+ The test to perform to check that the container is healthy. + The options are: +
    +
  • [] : inherit healthcheck from base image
  • +
  • ["NONE"] : disable healthcheck
  • +
  • ["CMD", arg1, arg2, ...] : exec arguments directly
  • +
  • ["CMD-SHELL", command] : run command with system's default shell
  • +
+ + The test command should exit with a status of 0 if the container is healthy, + or with 1 if it is unhealthy. +
+
+ Interval integer +
+
+ Number of nanoseconds to wait between probe attempts. +
+
+ Timeout integer +
+
+ Number of nanoseconds to wait before considering the check to have hung. +
+
+ Retries integer +
+
+ The number of consecutive failures needed to consider a container as unhealthy. +
+
+ + In each case, the field can be omitted to indicate that the + value should be inherited from the base layer. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/image/spec/v1.md b/image/spec/v1.md new file mode 100644 index 0000000..fce3a06 --- /dev/null +++ b/image/spec/v1.md @@ -0,0 +1,573 @@ +# Docker Image Specification v1.0.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Image layer is a general + term which may be used to refer to one or both of the following: + +
    +
  1. The metadata for the layer, described in the JSON format.
  2. +
  3. The filesystem changes described by a layer.
  4. +
+ + To refer to the former you may use the term Layer JSON or + Layer Metadata. To refer to the latter you may use the term + Image Filesystem Changeset or Image Diff. +
+
+ Image JSON +
+
+ Each layer has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Image ID +
+
+ Each layer is given an ID upon its creation. It is + represented as a hexadecimal encoding of 256 bits, e.g., + a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Image IDs should be sufficiently random so as to be globally unique. + 32 bytes read from /dev/urandom is sufficient for all + practical purposes. Alternatively, an image ID may be derived as a + cryptographic hash of image contents as the result is considered + indistinguishable from random. The choice is left up to implementors. +
+
+ Image Parent +
+
+ Most layer metadata structs contain a parent field which + refers to the Image from which another directly descends. An image + contains a separate JSON metadata file and set of changes relative to + the filesystem of its parent image. Image Ancestor and + Image Descendant are also common terms. +
+
+ Image Checksum +
+
+ Layer metadata structs contain a cryptographic hash of the contents of + the layer's filesystem changeset. Though the set of changes exists as a + simple Tar archive, two archives with identical filenames and content + will have different SHA digests if the last-access or last-modified + times of any entries differ. For this reason, image checksums are + generated using the TarSum algorithm which produces a cryptographic + hash of file contents and selected headers only. Details of this + algorithm are described in the separate TarSum specification. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. An image name suffix (the name component after :) is + often referred to as a tag as well, though it strictly refers to the + full name of an image. Acceptable values for a tag suffix are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-Z0-9], punctuation + characters [._-], and MUST NOT contain a : + character. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. Acceptable values for repository name are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-Z0-9], and punctuation + characters [._-], however it MAY contain additional + / and : characters for organizational + purposes, with the last : character being interpreted + dividing the repository component of the name from the tag suffix + component. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", + "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", + "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", + "created": "2014-10-13T21:19:18.674353812Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "Size": 271828, + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + } +} +``` + +### Image JSON Field Descriptions + +
+
+ id string +
+
+ Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies + the image. +
+
+ parent string +
+
+ ID of the parent image. If there is no parent image then this field + should be omitted. A collection of images may share many of the same + ancestor layers. This organizational structure is strictly a tree with + any one layer having either no parent or a single parent and zero or + more descendant layers. Cycles are not allowed and implementations + should be careful to avoid creating them or iterating through a cycle + indefinitely. +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ checksum string +
+
+ Image Checksum of the filesystem changeset associated with the image + layer. +
+
+ Size integer +
+
+ The size in bytes of the filesystem changeset associated with the image + layer. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory named with the +ID of the image being created. Here is the initial empty directory structure +for the changeset for an image with ID `c3167915dc9d` ([real IDs are much +longer](#id_desc), but this example use a truncated one here for brevity. +Implementations need not name the rootfs directory in this way but it may be +convenient for keeping record of a large number of image layers.): + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +The TarSum checksum for the archive file is then computed and placed in the +JSON metadata along with the execution parameters. + +To make changes to the filesystem of this container image, create a new +directory named with a new ID, such as `f60c56784b83`, and initialize it with +a snapshot of the parent image's root filesystem, so that the directory is +identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem +can make this very efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going to add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c +│   ├── VERSION +│   ├── json +│   └── layer.tar +└── repositories +``` + +There are one or more directories named with the ID for each layer in a full +image. Each of these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +And the `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. + +## Loading an Image Filesystem Changeset + +Unpacking a bundle of image layer JSON files and their corresponding filesystem +changesets can be done using a series of steps: + +1. Follow the parent IDs of image layers to find the root ancestor (an image +with no parent ID specified). +2. For every image layer, in order from root ancestor and descending down, +extract the contents of that layer's filesystem changeset archive into a +directory which will be used as the root of a container filesystem. + + - Extract all contents of each archive. + - Walk the directory tree once more, removing any files with the prefix + `.wh.` and the corresponding file or directory named without this prefix. + + +## Implementations + +This specification is an admittedly imperfect description of an +imperfectly-understood problem. The Docker project is, in turn, an attempt to +implement this specification. Our goal and our execution toward it will evolve +over time, but our primary concern in this specification and in our +implementation is compatibility and interoperability. diff --git a/image/store.go b/image/store.go new file mode 100644 index 0000000..3ab5487 --- /dev/null +++ b/image/store.go @@ -0,0 +1,367 @@ +package image + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digestset" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + GetTarSeekStream(id ID) (ioutils.ReadSeekCloser, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + SetLastUpdated(id ID) error + GetLastUpdated(id ID) (time.Time, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.RWMutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digestset.Set + platform string +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, platform string, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digestset.NewSet(), + platform: platform, + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // TODO @jhowardmsft - LCOW Support. This will need revisiting. + // Integrity check - ensure we are creating something for the correct platform + if system.LCOWSupported() { + if strings.ToLower(img.Platform()) != strings.ToLower(is.platform) { + return "", fmt.Errorf("cannot create entry for platform %q in image store for platform %q", img.Platform(), is.platform) + } + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", errors.Wrapf(err, "failed to get layer %s", layerID) + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digestset.ErrDigestNotFound { + err = fmt.Errorf("No such image: %s", term) + } + return "", err + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +// GetTarSeekStream returns a concatenation of Tar streams +func (is *store) GetTarSeekStream(id ID) (ioutils.ReadSeekCloser, error) { + img, err := is.Get(id) + if err != nil { + return nil, err + } + + var result ioutils.ReadSeekCloser + + for i := range img.RootFS.DiffIDs { + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + + l, err := is.ls.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + + arch, err := l.TarSeekStream() + if err != nil { + return nil, err + } + + stream := ioutils.NewReadSeekCloserWrapper(arch, func() error { + _, err := is.ls.Release(l) + return err + }) + + if result == nil { + result = stream + } else { + result, err = ioutils.ConcatReadSeekClosers(result, stream) + if err != nil { + return nil, err + } + } + } + + return result, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +// SetLastUpdated time for the image ID to the current time +func (is *store) SetLastUpdated(id ID) error { + lastUpdated := []byte(time.Now().Format(time.RFC3339Nano)) + return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated) +} + +// GetLastUpdated time for the image ID +func (is *store) GetLastUpdated(id ID) (time.Time, error) { + bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated") + if err != nil || len(bytes) == 0 { + // No lastUpdated time + return time.Time{}, nil + } + return time.Parse(time.RFC3339Nano, string(bytes)) +} + +func (is *store) Children(id ID) []ID { + is.RLock() + defer is.RUnlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.RLock() + defer is.RUnlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/image/store_test.go b/image/store_test.go new file mode 100644 index 0000000..fc6d461 --- /dev/null +++ b/image/store_test.go @@ -0,0 +1,178 @@ +package image + +import ( + "runtime" + "testing" + + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/testutil" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" +) + +func TestRestore(t *testing.T) { + fs, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + _, err = fs.Set([]byte(`invalid`)) + assert.NoError(t, err) + + id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + assert.NoError(t, err) + + err = fs.SetMetadata(id2, "parent", []byte(id1)) + assert.NoError(t, err) + + is, err := NewImageStore(fs, runtime.GOOS, &mockLayerGetReleaser{}) + assert.NoError(t, err) + + assert.Len(t, is.Map(), 2) + + img1, err := is.Get(ID(id1)) + assert.NoError(t, err) + assert.Equal(t, ID(id1), img1.computedID) + assert.Equal(t, string(id1), img1.computedID.String()) + + img2, err := is.Get(ID(id2)) + assert.NoError(t, err) + assert.Equal(t, "abc", img1.Comment) + assert.Equal(t, "def", img2.Comment) + + p, err := is.GetParent(ID(id1)) + testutil.ErrorContains(t, err, "failed to read metadata") + + p, err = is.GetParent(ID(id2)) + assert.NoError(t, err) + assert.Equal(t, ID(id1), p) + + children := is.Children(ID(id1)) + assert.Len(t, children, 1) + assert.Equal(t, ID(id2), children[0]) + assert.Len(t, is.Heads(), 1) + + sid1, err := is.Search(string(id1)[:10]) + assert.NoError(t, err) + assert.Equal(t, ID(id1), sid1) + + sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) + assert.NoError(t, err) + assert.Equal(t, ID(id1), sid1) + + invalidPattern := digest.Digest(id1).Hex()[1:6] + _, err = is.Search(invalidPattern) + testutil.ErrorContains(t, err, "No such image") +} + +func TestAddDelete(t *testing.T) { + is, cleanup := defaultImageStore(t) + defer cleanup() + + id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + assert.NoError(t, err) + assert.Equal(t, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1) + + img, err := is.Get(id1) + assert.NoError(t, err) + assert.Equal(t, "abc", img.Comment) + + id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + assert.NoError(t, err) + + err = is.SetParent(id2, id1) + assert.NoError(t, err) + + pid1, err := is.GetParent(id2) + assert.NoError(t, err) + assert.Equal(t, pid1, id1) + + _, err = is.Delete(id1) + assert.NoError(t, err) + + _, err = is.Get(id1) + testutil.ErrorContains(t, err, "failed to get digest") + + _, err = is.Get(id2) + assert.NoError(t, err) + + _, err = is.GetParent(id2) + testutil.ErrorContains(t, err, "failed to read metadata") +} + +func TestSearchAfterDelete(t *testing.T) { + is, cleanup := defaultImageStore(t) + defer cleanup() + + id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + id1, err := is.Search(string(id)[:15]) + assert.NoError(t, err) + assert.Equal(t, id1, id) + + _, err = is.Delete(id) + assert.NoError(t, err) + + _, err = is.Search(string(id)[:15]) + testutil.ErrorContains(t, err, "No such image") +} + +func TestParentReset(t *testing.T) { + is, cleanup := defaultImageStore(t) + defer cleanup() + + id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + assert.NoError(t, is.SetParent(id, id2)) + assert.Len(t, is.Children(id2), 1) + + assert.NoError(t, is.SetParent(id, id3)) + assert.Len(t, is.Children(id2), 0) + assert.Len(t, is.Children(id3), 1) +} + +func defaultImageStore(t *testing.T) (Store, func()) { + fsBackend, cleanup := defaultFSStoreBackend(t) + + store, err := NewImageStore(fsBackend, runtime.GOOS, &mockLayerGetReleaser{}) + assert.NoError(t, err) + + return store, cleanup +} + +func TestGetAndSetLastUpdated(t *testing.T) { + store, cleanup := defaultImageStore(t) + defer cleanup() + + id, err := store.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + assert.NoError(t, err) + + updated, err := store.GetLastUpdated(id) + assert.NoError(t, err) + assert.Equal(t, updated.IsZero(), true) + + assert.NoError(t, store.SetLastUpdated(id)) + + updated, err = store.GetLastUpdated(id) + assert.NoError(t, err) + assert.Equal(t, updated.IsZero(), false) +} + +type mockLayerGetReleaser struct{} + +func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} diff --git a/image/tarexport/load.go b/image/tarexport/load.go new file mode 100644 index 0000000..af8cefc --- /dev/null +++ b/image/tarexport/load.go @@ -0,0 +1,431 @@ +package tarexport + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var progressOutput progress.Output + if !quiet { + progressOutput = streamformatter.NewJSONProgressOutput(outStream, false) + } + outStream = streamformatter.NewStdoutWriter(outStream) + + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream, progressOutput) + } + return err + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + var parentLinks []parentLink + var imageIDsStr string + var imageRefCount int + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + if err := checkCompatibleOS(img.OS); err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %d, got %d", expected, actual) + } + + // On Windows, validate the platform, defaulting to windows if not present. + platform := layer.Platform(img.OS) + if runtime.GOOS == "windows" { + if platform == "" { + platform = "windows" + } + if (platform != "windows") && (platform != "linux") { + return fmt.Errorf("configuration for this image has an unsupported platform: %s", platform) + } + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), platform, m.LayerSources[diffID], progressOutput) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID) + + imageRefCount = 0 + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNormalizedNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", reference.FamiliarString(ref)))) + imageRefCount++ + } + + parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) + l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") + } + + for _, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + if err := l.setParentID(p.id, p.parentID); err != nil { + return err + } + } + } + + if imageRefCount == 0 { + outStream.Write([]byte(imageIDsStr)) + } + + return nil +} + +func (l *tarexporter) setParentID(id, parentID image.ID) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + parent, err := l.is.Get(parentID) + if err != nil { + return err + } + if !checkValidParent(img, parent) { + return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID()) + } + return l.is.SetParent(id, parentID) +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, platform layer.Platform, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list. On Linux, this equates to a regular os.Open. + rawTar, err := system.OpenSequential(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + var r io.Reader + if progressOutput != nil { + fileInfo, err := rawTar.Stat() + if err != nil { + logrus.Debugf("Error statting file: %v", err) + return nil, err + } + + r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") + } else { + r = rawTar + } + + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + if ds, ok := l.ls.(layer.DescribableStore); ok { + return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), platform, foreignSrc) + } + return l.ls.Register(inflatedLayerData, rootFS.ChainID(), platform) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", reference.FamiliarString(ref), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + if runtime.GOOS == "windows" { + return errors.New("Windows does not support legacy loading of images") + } + + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + return err + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct { + OS string + Parent string + } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + if err := checkCompatibleOS(img.OS); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, "", distribution.Descriptor{}, progressOutput) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} + +type parentLink struct { + id, parentID image.ID +} + +func validatedParentLinks(pl []parentLink) (ret []parentLink) { +mainloop: + for i, p := range pl { + ret = append(ret, p) + for _, p2 := range pl { + if p2.id == p.parentID && p2.id != p.id { + continue mainloop + } + } + ret[i].parentID = "" + } + return +} + +func checkValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 && len(parent.History) == 0 { + return true // having history is not mandatory + } + if len(img.History)-len(parent.History) != 1 { + return false + } + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + return true +} + +func checkCompatibleOS(os string) error { + // TODO @jhowardmsft LCOW - revisit for simultaneous platforms + platform := runtime.GOOS + if system.LCOWSupported() { + platform = "linux" + } + // always compatible if the OS matches; also match an empty OS + if os == platform || os == "" { + return nil + } + // for compatibility, only fail if the image or runtime OS is Windows + if os == "windows" || platform == "windows" { + return fmt.Errorf("cannot load %s image on %s", os, platform) + } + return nil +} diff --git a/image/tarexport/save.go b/image/tarexport/save.go new file mode 100644 index 0000000..d304a54 --- /dev/null +++ b/image/tarexport/save.go @@ -0,0 +1,409 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string + image *image.Image + layerRef layer.Layer +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} + diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + // Release all the image top layer references + defer l.releaseLayerReferences(images) + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +// parseNames will parse the image names to a map which contains image.ID to *imageDescriptor. +// Each imageDescriptor holds an image top layer reference named 'layerRef'. It is taken here, should be released later. +func (l *tarexporter) parseNames(names []string) (desc map[image.ID]*imageDescriptor, rErr error) { + imgDescr := make(map[image.ID]*imageDescriptor) + defer func() { + if rErr != nil { + l.releaseLayerReferences(imgDescr) + } + }() + + addAssoc := func(id image.ID, ref reference.Named) error { + if _, ok := imgDescr[id]; !ok { + descr := &imageDescriptor{} + if err := l.takeLayerReference(id, descr); err != nil { + return err + } + imgDescr[id] = descr + } + + if ref != nil { + if _, ok := ref.(reference.Canonical); ok { + return nil + } + tagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged) + if !ok { + return nil + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return nil + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + return nil + } + + for _, name := range names { + ref, err := reference.ParseAnyReference(name) + if err != nil { + return nil, err + } + namedRef, ok := ref.(reference.Named) + if !ok { + // Check if digest ID reference + if digested, ok := ref.(reference.Digested); ok { + id := image.IDFromDigest(digested.Digest()) + if err := addAssoc(id, nil); err != nil { + return nil, err + } + continue + } + return nil, errors.Errorf("invalid reference: %v", name) + } + + if reference.FamiliarName(namedRef) == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + if err := addAssoc(imgID, nil); err != nil { + return nil, err + } + continue + } + if reference.IsNameOnly(namedRef) { + assocs := l.rs.ReferencesByName(namedRef) + for _, assoc := range assocs { + if err := addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref); err != nil { + return nil, err + } + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + if err := addAssoc(imgID, nil); err != nil { + return nil, err + } + } + continue + } + id, err := l.rs.Get(namedRef) + if err != nil { + return nil, err + } + if err := addAssoc(image.IDFromDigest(id), namedRef); err != nil { + return nil, err + } + + } + return imgDescr, nil +} + +// takeLayerReference will take/Get the image top layer reference +func (l *tarexporter) takeLayerReference(id image.ID, imgDescr *imageDescriptor) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + imgDescr.image = img + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + return nil + } + layer, err := l.ls.Get(topLayerID) + if err != nil { + return err + } + imgDescr.layerRef = layer + return nil +} + +// releaseLayerReferences will release all the image top layer references +func (l *tarexporter) releaseLayerReferences(imgDescr map[image.ID]*imageDescriptor) error { + for _, descr := range imgDescr { + if descr.layerRef != nil { + l.ls.Release(descr.layerRef) + } + } + return nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + s.diffIDPaths = make(map[layer.DiffID]string) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + var parentLinks []parentLink + + for id, imageDescr := range s.images { + foreignSrcs, err := s.saveImage(id) + if err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + familiarName := reference.FamiliarName(ref) + if _, ok := reposLegacy[familiarName]; !ok { + reposLegacy[familiarName] = make(map[string]string) + } + reposLegacy[familiarName][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, reference.FamiliarString(ref)) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: id.Digest().Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + LayerSources: foreignSrcs, + }) + + parentID, _ := s.is.GetParent(id) + parentLinks = append(parentLinks, parentLink{id, parentID}) + s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") + } + + for i, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + manifest[i].Parent = p.parentID + } + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil { + rf.Close() + return err + } + + rf.Close() + + if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(f).Encode(manifest); err != nil { + f.Close() + return err + } + + f.Close() + + if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + _, err = io.Copy(outStream, fs) + return err +} + +func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { + img := s.images[id].image + if len(img.RootFS.DiffIDs) == 0 { + return nil, fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + var foreignSrcs map[layer.DiffID]distribution.Descriptor + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{ + // This is for backward compatibility used for + // pre v1.9 docker. + Created: time.Unix(0, 0), + } + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return nil, err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) + if err != nil { + return nil, err + } + layers = append(layers, v1Img.ID) + parent = v1ID + if src.Digest != "" { + if foreignSrcs == nil { + foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) + } + foreignSrcs[img.RootFS.DiffIDs[i]] = src + } + } + + configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return nil, err + } + if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { + return nil, err + } + + s.images[id].layers = layers + return foreignSrcs, nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return distribution.Descriptor{}, nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return distribution.Descriptor{}, err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return distribution.Descriptor{}, err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return distribution.Descriptor{}, err + } + + // serialize filesystem + layerPath := filepath.Join(outDir, legacyLayerFileName) + l, err := s.ls.Get(id) + if err != nil { + return distribution.Descriptor{}, err + } + defer layer.ReleaseAndLog(s.ls, l) + + if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { + relPath, err := filepath.Rel(outDir, oldPath) + if err != nil { + return distribution.Descriptor{}, err + } + if err := os.Symlink(relPath, layerPath); err != nil { + return distribution.Descriptor{}, errors.Wrap(err, "error creating symlink while saving layer") + } + } else { + // Use system.CreateSequential rather than os.Create. This ensures sequential + // file access on Windows to avoid eating into MM standby list. + // On Linux, this equates to a regular os.Create. + tarFile, err := system.CreateSequential(layerPath) + if err != nil { + return distribution.Descriptor{}, err + } + defer tarFile.Close() + + arch, err := l.TarStream() + if err != nil { + return distribution.Descriptor{}, err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return distribution.Descriptor{}, err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return distribution.Descriptor{}, err + } + } + + s.diffIDPaths[l.DiffID()] = layerPath + } + s.savedLayers[legacyImg.ID] = struct{}{} + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + return src, nil +} diff --git a/image/tarexport/tarexport.go b/image/tarexport/tarexport.go new file mode 100644 index 0000000..f7fab74 --- /dev/null +++ b/image/tarexport/tarexport.go @@ -0,0 +1,47 @@ +package tarexport + +import ( + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + refstore "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent image.ID `json:",omitempty"` + LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"` +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs refstore.Store + loggerImgEvent LogImageEvent +} + +// LogImageEvent defines interface for event generation related to image tar(load and save) operations +type LogImageEvent interface { + //LogImageEvent generates an event related to an image operation + LogImageEvent(imageID, refName, action string) +} + +// NewTarExporter returns new Exporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs refstore.Store, loggerImgEvent LogImageEvent) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + loggerImgEvent: loggerImgEvent, + } +} diff --git a/image/v1/imagev1.go b/image/v1/imagev1.go new file mode 100644 index 0000000..0e8a23c --- /dev/null +++ b/image/v1/imagev1.go @@ -0,0 +1,150 @@ +package v1 + +import ( + "encoding/json" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" +) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = "1.8.3" + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsistent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates a legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + imageType := reflect.TypeOf(img).Elem() + for i := 0; i < imageType.NumField(); i++ { + f := imageType.Field(i) + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + // Parent is handled specially below. + if jsonName != "" && jsonName != "parent" { + delete(configAsMap, jsonName) + } + } + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + return stringid.ValidateID(id) +} diff --git a/image/v1/imagev1_test.go b/image/v1/imagev1_test.go new file mode 100644 index 0000000..936c55e --- /dev/null +++ b/image/v1/imagev1_test.go @@ -0,0 +1,55 @@ +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/docker/docker/image" +) + +func TestMakeV1ConfigFromConfig(t *testing.T) { + img := &image.Image{ + V1Image: image.V1Image{ + ID: "v2id", + Parent: "v2parent", + OS: "os", + }, + OSVersion: "osversion", + RootFS: &image.RootFS{ + Type: "layers", + }, + } + v2js, err := json.Marshal(img) + if err != nil { + t.Fatal(err) + } + + // Convert the image back in order to get RawJSON() support. + img, err = image.NewFromJSON(v2js) + if err != nil { + t.Fatal(err) + } + + js, err := MakeV1ConfigFromConfig(img, "v1id", "v1parent", false) + if err != nil { + t.Fatal(err) + } + + newimg := &image.Image{} + err = json.Unmarshal(js, newimg) + if err != nil { + t.Fatal(err) + } + + if newimg.V1Image.ID != "v1id" || newimg.Parent != "v1parent" { + t.Error("ids should have changed", newimg.V1Image.ID, newimg.V1Image.Parent) + } + + if newimg.RootFS != nil { + t.Error("rootfs should have been removed") + } + + if newimg.V1Image.OS != "os" { + t.Error("os should have been preserved") + } +} diff --git a/integration-cli/benchmark_test.go b/integration-cli/benchmark_test.go new file mode 100644 index 0000000..ae0f67f --- /dev/null +++ b/integration-cli/benchmark_test.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "sync" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) { + maxConcurrency := runtime.GOMAXPROCS(0) + numIterations := c.N + outerGroup := &sync.WaitGroup{} + outerGroup.Add(maxConcurrency) + chErr := make(chan error, numIterations*2*maxConcurrency) + + for i := 0; i < maxConcurrency; i++ { + go func() { + defer outerGroup.Done() + innerGroup := &sync.WaitGroup{} + innerGroup.Add(2) + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + args := []string{"run", "-d", defaultSleepImage} + args = append(args, sleepCommandForDaemonPlatform()...) + out, _, err := dockerCmdWithError(args...) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + id := strings.TrimSpace(out) + tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id) + if err != nil { + chErr <- err + return + } + defer os.RemoveAll(tmpDir) + out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("start", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + // don't do an rm -f here since it can potentially ignore errors from the graphdriver + out, _, err = dockerCmdWithError("rm", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + out, _, err := dockerCmdWithError("ps") + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + innerGroup.Wait() + }() + } + + outerGroup.Wait() + close(chErr) + + for err := range chErr { + c.Assert(err, checker.IsNil) + } +} diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go new file mode 100644 index 0000000..be8f054 --- /dev/null +++ b/integration-cli/check_test.go @@ -0,0 +1,432 @@ +package main + +import ( + "fmt" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "syscall" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/config" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/environment" + "github.com/docker/docker/integration-cli/registry" + "github.com/docker/docker/pkg/reexec" + "github.com/go-check/check" +) + +const ( + // the private registry to use for tests + privateRegistryURL = "127.0.0.1:5000" + + // path to containerd's ctr binary + ctrBinary = "docker-containerd-ctr" + + // the docker daemon binary to use + dockerdBinary = "dockerd" +) + +var ( + testEnv *environment.Execution + + // the docker client binary to use + dockerBinary = "" +) + +func init() { + var err error + + reexec.Init() // This is required for external graphdriver tests + + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func TestMain(m *testing.M) { + dockerBinary = testEnv.DockerBinary() + + if testEnv.LocalDaemon() { + fmt.Println("INFO: Testing against a local daemon") + } else { + fmt.Println("INFO: Testing against a remote daemon") + } + exitCode := m.Run() + os.Exit(exitCode) +} + +func Test(t *testing.T) { + cli.EnsureTestEnvIsLoaded(t) + fakestorage.EnsureTestEnvIsLoaded(t) + cmd := exec.Command(dockerBinary, "images", "-f", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}") + cmd.Env = appendBaseEnv(true) + out, err := cmd.CombinedOutput() + if err != nil { + panic(fmt.Errorf("err=%v\nout=%s\n", err, out)) + } + images := strings.Split(strings.TrimSpace(string(out)), "\n") + testEnv.ProtectImage(t, images...) + if testEnv.DaemonPlatform() == "linux" { + ensureFrozenImagesLinux(t) + } + check.TestingT(t) +} + +func init() { + check.Suite(&DockerSuite{}) +} + +type DockerSuite struct { +} + +func (s *DockerSuite) OnTimeout(c *check.C) { + if testEnv.DaemonPID() > 0 && testEnv.LocalDaemon() { + daemon.SignalDaemonDump(testEnv.DaemonPID()) + } +} + +func (s *DockerSuite) TearDownTest(c *check.C) { + testEnv.Clean(c, dockerBinary) +} + +func init() { + check.Suite(&DockerRegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistrySuite struct { + ds *DockerSuite + reg *registry.V2 + d *daemon.Daemon +} + +func (s *DockerRegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, registry.Hosting) + s.reg = setupRegistry(c, false, "", "") + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerRegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerSchema1RegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSchema1RegistrySuite struct { + ds *DockerSuite + reg *registry.V2 + d *daemon.Daemon +} + +func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, registry.Hosting, NotArm64) + s.reg = setupRegistry(c, true, "", "") + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthHtpasswdSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthHtpasswdSuite struct { + ds *DockerSuite + reg *registry.V2 + d *daemon.Daemon +} + +func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, registry.Hosting) + s.reg = setupRegistry(c, false, "htpasswd", "") + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthTokenSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthTokenSuite struct { + ds *DockerSuite + reg *registry.V2 + d *daemon.Daemon +} + +func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, registry.Hosting) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) { + if s == nil { + c.Fatal("registry suite isn't initialized") + } + s.reg = setupRegistry(c, false, "token", tokenURL) +} + +func init() { + check.Suite(&DockerDaemonSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerDaemonSuite struct { + ds *DockerSuite + d *daemon.Daemon +} + +func (s *DockerDaemonSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerDaemonSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerDaemonSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + if s.d != nil { + s.d.Stop(c) + } + s.ds.TearDownTest(c) +} + +func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { + filepath.Walk(daemon.SockRoot, func(path string, fi os.FileInfo, err error) error { + if err != nil { + // ignore errors here + // not cleaning up sockets is not really an error + return nil + } + if fi.Mode() == os.ModeSocket { + syscall.Unlink(path) + } + return nil + }) + os.RemoveAll(daemon.SockRoot) +} + +const defaultSwarmPort = 2477 + +func init() { +} + +type DockerSwarmSuite struct { + server *httptest.Server + ds *DockerSuite + daemons []*daemon.Swarm + daemonsLock sync.Mutex // protect access to daemons + portIndex int +} + +func (s *DockerSwarmSuite) OnTimeout(c *check.C) { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + d.DumpStackAndQuit() + } +} + +func (s *DockerSwarmSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux) +} + +func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Swarm { + d := &daemon.Swarm{ + Daemon: daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }), + Port: defaultSwarmPort + s.portIndex, + } + d.ListenAddr = fmt.Sprintf("0.0.0.0:%d", d.Port) + args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts + d.StartWithBusybox(c, args...) + + if joinSwarm == true { + if len(s.daemons) > 0 { + tokens := s.daemons[0].JoinTokens(c) + token := tokens.Worker + if manager { + token = tokens.Manager + } + c.Assert(d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{s.daemons[0].ListenAddr}, + JoinToken: token, + }), check.IsNil) + } else { + c.Assert(d.Init(swarm.InitRequest{}), check.IsNil) + } + } + + s.portIndex++ + s.daemonsLock.Lock() + s.daemons = append(s.daemons, d) + s.daemonsLock.Unlock() + + return d +} + +func (s *DockerSwarmSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux) + s.daemonsLock.Lock() + for _, d := range s.daemons { + if d != nil { + d.Stop(c) + // FIXME(vdemeester) should be handled by SwarmDaemon ? + // raft state file is quite big (64MB) so remove it after every test + walDir := filepath.Join(d.Root, "swarm/raft/wal") + if err := os.RemoveAll(walDir); err != nil { + c.Logf("error removing %v: %v", walDir, err) + } + + d.CleanupExecRoot(c) + } + } + s.daemons = nil + s.daemonsLock.Unlock() + + s.portIndex = 0 + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerTrustSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerTrustSuite struct { + ds *DockerSuite + reg *registry.V2 + not *testNotary +} + +func (s *DockerTrustSuite) OnTimeout(c *check.C) { + s.ds.OnTimeout(c) +} + +func (s *DockerTrustSuite) SetUpTest(c *check.C) { + testRequires(c, registry.Hosting, NotaryServerHosting) + s.reg = setupRegistry(c, false, "", "") + s.not = setupNotary(c) +} + +func (s *DockerTrustSuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.not != nil { + s.not.Close() + } + + // Remove trusted keys and metadata after test + os.RemoveAll(filepath.Join(config.Dir(), "trust")) + s.ds.TearDownTest(c) +} + +func init() { +} + +type DockerTrustedSwarmSuite struct { + swarmSuite DockerSwarmSuite + trustSuite DockerTrustSuite + reg *registry.V2 + not *testNotary +} + +func (s *DockerTrustedSwarmSuite) SetUpTest(c *check.C) { + s.swarmSuite.SetUpTest(c) + s.trustSuite.SetUpTest(c) +} + +func (s *DockerTrustedSwarmSuite) TearDownTest(c *check.C) { + s.trustSuite.TearDownTest(c) + s.swarmSuite.TearDownTest(c) +} + +func (s *DockerTrustedSwarmSuite) OnTimeout(c *check.C) { + s.swarmSuite.OnTimeout(c) +} diff --git a/integration-cli/checker/checker.go b/integration-cli/checker/checker.go new file mode 100644 index 0000000..d1b703a --- /dev/null +++ b/integration-cli/checker/checker.go @@ -0,0 +1,46 @@ +// Package checker provides Docker specific implementations of the go-check.Checker interface. +package checker + +import ( + "github.com/go-check/check" + "github.com/vdemeester/shakers" +) + +// As a commodity, we bring all check.Checker variables into the current namespace to avoid having +// to think about check.X versus checker.X. +var ( + DeepEquals = check.DeepEquals + ErrorMatches = check.ErrorMatches + FitsTypeOf = check.FitsTypeOf + HasLen = check.HasLen + Implements = check.Implements + IsNil = check.IsNil + Matches = check.Matches + Not = check.Not + NotNil = check.NotNil + PanicMatches = check.PanicMatches + Panics = check.Panics + + Contains = shakers.Contains + ContainsAny = shakers.ContainsAny + Count = shakers.Count + Equals = shakers.Equals + EqualFold = shakers.EqualFold + False = shakers.False + GreaterOrEqualThan = shakers.GreaterOrEqualThan + GreaterThan = shakers.GreaterThan + HasPrefix = shakers.HasPrefix + HasSuffix = shakers.HasSuffix + Index = shakers.Index + IndexAny = shakers.IndexAny + IsAfter = shakers.IsAfter + IsBefore = shakers.IsBefore + IsBetween = shakers.IsBetween + IsLower = shakers.IsLower + IsUpper = shakers.IsUpper + LessOrEqualThan = shakers.LessOrEqualThan + LessThan = shakers.LessThan + TimeEquals = shakers.TimeEquals + True = shakers.True + TimeIgnore = shakers.TimeIgnore +) diff --git a/integration-cli/cli/build/build.go b/integration-cli/cli/build/build.go new file mode 100644 index 0000000..8ffaa35 --- /dev/null +++ b/integration-cli/cli/build/build.go @@ -0,0 +1,82 @@ +package build + +import ( + "io" + "strings" + + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + icmd "github.com/docker/docker/pkg/testutil/cmd" +) + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// WithStdinContext sets the build context from the standard input with the specified reader +func WithStdinContext(closer io.ReadCloser) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "-") + cmd.Stdin = closer + return func() { + // FIXME(vdemeester) we should not ignore the error here… + closer.Close() + } + } +} + +// WithDockerfile creates / returns a CmdOperator to set the Dockerfile for a build operation +func WithDockerfile(dockerfile string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "-") + cmd.Stdin = strings.NewReader(dockerfile) + return nil + } +} + +// WithoutCache makes the build ignore cache +func WithoutCache(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "--no-cache") + return nil +} + +// WithContextPath sets the build context path +func WithContextPath(path string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, path) + return nil + } +} + +// WithExternalBuildContext use the specified context as build context +func WithExternalBuildContext(ctx *fakecontext.Fake) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Command = append(cmd.Command, ".") + return nil + } +} + +// WithBuildContext sets up the build context +func WithBuildContext(t testingT, contextOperators ...func(*fakecontext.Fake) error) func(*icmd.Cmd) func() { + // FIXME(vdemeester) de-duplicate that + ctx := fakecontext.New(t, "", contextOperators...) + return func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Command = append(cmd.Command, ".") + return closeBuildContext(t, ctx) + } +} + +// WithFile adds the specified file (with content) in the build context +func WithFile(name, content string) func(*fakecontext.Fake) error { + return fakecontext.WithFile(name, content) +} + +func closeBuildContext(t testingT, ctx *fakecontext.Fake) func() { + return func() { + if err := ctx.Close(); err != nil { + t.Fatal(err) + } + } +} diff --git a/integration-cli/cli/build/fakecontext/context.go b/integration-cli/cli/build/fakecontext/context.go new file mode 100644 index 0000000..8ecf4e3 --- /dev/null +++ b/integration-cli/cli/build/fakecontext/context.go @@ -0,0 +1,124 @@ +package fakecontext + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" +) + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// New creates a fake build context +func New(t testingT, dir string, modifiers ...func(*Fake) error) *Fake { + fakeContext := &Fake{Dir: dir} + if dir == "" { + if err := newDir(fakeContext); err != nil { + t.Fatal(err) + } + } + + for _, modifier := range modifiers { + if err := modifier(fakeContext); err != nil { + t.Fatal(err) + } + } + + return fakeContext +} + +func newDir(fake *Fake) error { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return err + } + if err := os.Chmod(tmp, 0755); err != nil { + return err + } + fake.Dir = tmp + return nil +} + +// WithFile adds the specified file (with content) in the build context +func WithFile(name, content string) func(*Fake) error { + return func(ctx *Fake) error { + return ctx.Add(name, content) + } +} + +// WithDockerfile adds the specified content as Dockerfile in the build context +func WithDockerfile(content string) func(*Fake) error { + return WithFile("Dockerfile", content) +} + +// WithFiles adds the specified files in the build context, content is a string +func WithFiles(files map[string]string) func(*Fake) error { + return func(fakeContext *Fake) error { + for file, content := range files { + if err := fakeContext.Add(file, content); err != nil { + return err + } + } + return nil + } +} + +// WithBinaryFiles adds the specified files in the build context, content is binary +func WithBinaryFiles(files map[string]*bytes.Buffer) func(*Fake) error { + return func(fakeContext *Fake) error { + for file, content := range files { + if err := fakeContext.Add(file, string(content.Bytes())); err != nil { + return err + } + } + return nil + } +} + +// Fake creates directories that can be used as a build context +type Fake struct { + Dir string +} + +// Add a file at a path, creating directories where necessary +func (f *Fake) Add(file, content string) error { + return f.addFile(file, []byte(content)) +} + +func (f *Fake) addFile(file string, content []byte) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + dirpath := filepath.Dir(fp) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(fp, content, 0644) + +} + +// Delete a file at a path +func (f *Fake) Delete(file string) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + return os.RemoveAll(fp) +} + +// Close deletes the context +func (f *Fake) Close() error { + return os.RemoveAll(f.Dir) +} + +// AsTarReader returns a ReadCloser with the contents of Dir as a tar archive. +func (f *Fake) AsTarReader(t testingT) io.ReadCloser { + reader, err := archive.TarWithOptions(f.Dir, &archive.TarOptions{}) + if err != nil { + t.Fatalf("Failed to create tar from %s: %s", f.Dir, err) + } + return reader +} diff --git a/integration-cli/cli/build/fakegit/fakegit.go b/integration-cli/cli/build/fakegit/fakegit.go new file mode 100644 index 0000000..74faffd --- /dev/null +++ b/integration-cli/cli/build/fakegit/fakegit.go @@ -0,0 +1,125 @@ +package fakegit + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" +) + +type testingT interface { + logT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +type gitServer interface { + URL() string + Close() error +} + +type localGitServer struct { + *httptest.Server +} + +func (r *localGitServer) Close() error { + r.Server.Close() + return nil +} + +func (r *localGitServer) URL() string { + return r.Server.URL +} + +// FakeGit is a fake git server +type FakeGit struct { + root string + server gitServer + RepoURL string +} + +// Close closes the server, implements Closer interface +func (g *FakeGit) Close() { + g.server.Close() + os.RemoveAll(g.root) +} + +// New create a fake git server that can be used for git related tests +func New(c testingT, name string, files map[string]string, enforceLocalServer bool) *FakeGit { + ctx := fakecontext.New(c, "", fakecontext.WithFiles(files)) + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + c.Fatalf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + c.Fatal(err) + } + if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { + c.Fatalf("error trying to set 'user.name': %s (%s)", err, output) + } + if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { + c.Fatalf("error trying to set 'user.email': %s (%s)", err, output) + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + c.Fatalf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + c.Fatalf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + c.Fatal(err) + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + c.Fatalf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + c.Fatal(err) + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + c.Fatalf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + c.Fatal(err) + } + + var server gitServer + if !enforceLocalServer { + // use fakeStorage server, which might be local or remote (at test daemon) + server = fakestorage.New(c, root) + } else { + // always start a local http server on CLI test machine + httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) + server = &localGitServer{httpServer} + } + return &FakeGit{ + root: root, + server: server, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), + } +} diff --git a/integration-cli/cli/build/fakestorage/fixtures.go b/integration-cli/cli/build/fakestorage/fixtures.go new file mode 100644 index 0000000..f6a63dc --- /dev/null +++ b/integration-cli/cli/build/fakestorage/fixtures.go @@ -0,0 +1,67 @@ +package fakestorage + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + + "github.com/docker/docker/integration-cli/cli" +) + +var ensureHTTPServerOnce sync.Once + +func ensureHTTPServerImage(t testingT) { + var doIt bool + ensureHTTPServerOnce.Do(func() { + doIt = true + }) + + if !doIt { + return + } + + defer testEnv.ProtectImage(t, "httpserver:latest") + + tmp, err := ioutil.TempDir("", "docker-http-server-test") + if err != nil { + t.Fatalf("could not build http server: %v", err) + } + defer os.RemoveAll(tmp) + + goos := testEnv.DaemonPlatform() + if goos == "" { + goos = "linux" + } + goarch := os.Getenv("DOCKER_ENGINE_GOARCH") + if goarch == "" { + goarch = "amd64" + } + + goCmd, lookErr := exec.LookPath("go") + if lookErr != nil { + t.Fatalf("could not build http server: %v", lookErr) + } + + cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") + cmd.Env = append(os.Environ(), []string{ + "CGO_ENABLED=0", + "GOOS=" + goos, + "GOARCH=" + goarch, + }...) + var out []byte + if out, err = cmd.CombinedOutput(); err != nil { + t.Fatalf("could not build http server: %s", string(out)) + } + + cpCmd, lookErr := exec.LookPath("cp") + if lookErr != nil { + t.Fatalf("could not build http server: %v", lookErr) + } + if out, err = exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { + t.Fatalf("could not build http server: %v", string(out)) + } + + cli.DockerCmd(t, "build", "-q", "-t", "httpserver", tmp) +} diff --git a/integration-cli/cli/build/fakestorage/storage.go b/integration-cli/cli/build/fakestorage/storage.go new file mode 100644 index 0000000..49f47e4 --- /dev/null +++ b/integration-cli/cli/build/fakestorage/storage.go @@ -0,0 +1,176 @@ +package fakestorage + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "sync" + + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/environment" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/stringutils" +) + +var ( + testEnv *environment.Execution + onlyOnce sync.Once +) + +// EnsureTestEnvIsLoaded make sure the test environment is loaded for this package +func EnsureTestEnvIsLoaded(t testingT) { + var doIt bool + var err error + onlyOnce.Do(func() { + doIt = true + }) + + if !doIt { + return + } + testEnv, err = environment.New() + if err != nil { + t.Fatalf("error loading testenv : %v", err) + } +} + +type testingT interface { + logT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// Fake is a static file server. It might be running locally or remotely +// on test host. +type Fake interface { + Close() error + URL() string + CtxDir() string +} + +// New returns a static file server that will be use as build context. +func New(t testingT, dir string, modifiers ...func(*fakecontext.Fake) error) Fake { + ctx := fakecontext.New(t, dir, modifiers...) + if testEnv.LocalDaemon() { + return newLocalFakeStorage(t, ctx) + } + return newRemoteFileServer(t, ctx) +} + +// localFileStorage is a file storage on the running machine +type localFileStorage struct { + *fakecontext.Fake + *httptest.Server +} + +func (s *localFileStorage) URL() string { + return s.Server.URL +} + +func (s *localFileStorage) CtxDir() string { + return s.Fake.Dir +} + +func (s *localFileStorage) Close() error { + defer s.Server.Close() + return s.Fake.Close() +} + +func newLocalFakeStorage(t testingT, ctx *fakecontext.Fake) *localFileStorage { + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &localFileStorage{ + Fake: ctx, + Server: server, + } +} + +// remoteFileServer is a containerized static file server started on the remote +// testing machine to be used in URL-accepting docker build functionality. +type remoteFileServer struct { + host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 + container string + image string + ctx *fakecontext.Fake +} + +func (f *remoteFileServer) URL() string { + u := url.URL{ + Scheme: "http", + Host: f.host} + return u.String() +} + +func (f *remoteFileServer) CtxDir() string { + return f.ctx.Dir +} + +func (f *remoteFileServer) Close() error { + defer func() { + if f.ctx != nil { + f.ctx.Close() + } + if f.image != "" { + if err := cli.Docker(cli.Args("rmi", "-f", f.image)).Error; err != nil { + fmt.Fprintf(os.Stderr, "Error closing remote file server : %v\n", err) + } + } + }() + if f.container == "" { + return nil + } + return cli.Docker(cli.Args("rm", "-fv", f.container)).Error +} + +func newRemoteFileServer(t testingT, ctx *fakecontext.Fake) *remoteFileServer { + var ( + image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + ) + + ensureHTTPServerImage(t) + + // Build the image + if err := ctx.Add("Dockerfile", `FROM httpserver +COPY . /static`); err != nil { + t.Fatal(err) + } + cli.BuildCmd(t, image, build.WithoutCache, build.WithExternalBuildContext(ctx)) + + // Start the container + cli.DockerCmd(t, "run", "-d", "-P", "--name", container, image) + + // Find out the system assigned port + out := cli.DockerCmd(t, "port", container, "80/tcp").Combined() + fileserverHostPort := strings.Trim(out, "\n") + _, port, err := net.SplitHostPort(fileserverHostPort) + if err != nil { + t.Fatalf("unable to parse file server host:port: %v", err) + } + + dockerHostURL, err := url.Parse(request.DaemonHost()) + if err != nil { + t.Fatalf("unable to parse daemon host URL: %v", err) + } + + host, _, err := net.SplitHostPort(dockerHostURL.Host) + if err != nil { + t.Fatalf("unable to parse docker daemon host:port: %v", err) + } + + return &remoteFileServer{ + container: container, + image: image, + host: fmt.Sprintf("%s:%s", host, port), + ctx: ctx} +} diff --git a/integration-cli/cli/cli.go b/integration-cli/cli/cli.go new file mode 100644 index 0000000..d835521 --- /dev/null +++ b/integration-cli/cli/cli.go @@ -0,0 +1,231 @@ +package cli + +import ( + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/environment" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/pkg/errors" +) + +var ( + testEnv *environment.Execution + onlyOnce sync.Once +) + +// EnsureTestEnvIsLoaded make sure the test environment is loaded for this package +func EnsureTestEnvIsLoaded(t testingT) { + var doIt bool + var err error + onlyOnce.Do(func() { + doIt = true + }) + + if !doIt { + return + } + testEnv, err = environment.New() + if err != nil { + t.Fatalf("error loading testenv : %v", err) + } +} + +// CmdOperator defines functions that can modify a command +type CmdOperator func(*icmd.Cmd) func() + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// DockerCmd executes the specified docker command and expect a success +func DockerCmd(t testingT, args ...string) *icmd.Result { + return Docker(Args(args...)).Assert(t, icmd.Success) +} + +// BuildCmd executes the specified docker build command and expect a success +func BuildCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Result { + return Docker(Build(name), cmdOperators...).Assert(t, icmd.Success) +} + +// InspectCmd executes the specified docker inspect command and expect a success +func InspectCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Result { + return Docker(Inspect(name), cmdOperators...).Assert(t, icmd.Success) +} + +// WaitRun will wait for the specified container to be running, maximum 5 seconds. +func WaitRun(t testingT, name string, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.State.Running}}", "true", 5*time.Second, cmdOperators...) +} + +// WaitExited will wait for the specified container to state exit, subject +// to a maximum time limit in seconds supplied by the caller +func WaitExited(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.State.Status}}", "exited", timeout, cmdOperators...) +} + +// WaitRestart will wait for the specified container to restart once +func WaitRestart(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.RestartCount}}", "1", timeout, cmdOperators...) +} + +// WaitForInspectResult waits for the specified expression to be equals to the specified expected string in the given time. +func WaitForInspectResult(t testingT, name, expr, expected string, timeout time.Duration, cmdOperators ...CmdOperator) { + after := time.After(timeout) + + args := []string{"inspect", "-f", expr, name} + for { + result := Docker(Args(args...), cmdOperators...) + if result.Error != nil { + if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { + t.Fatalf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + t.Fatal(result.Error) + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + t.Fatalf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) + default: + } + + time.Sleep(100 * time.Millisecond) + } +} + +// Docker executes the specified docker command +func Docker(cmd icmd.Cmd, cmdOperators ...CmdOperator) *icmd.Result { + for _, op := range cmdOperators { + deferFn := op(&cmd) + if deferFn != nil { + defer deferFn() + } + } + appendDocker(&cmd) + if err := validateArgs(cmd.Command...); err != nil { + return &icmd.Result{ + Error: err, + } + } + return icmd.RunCmd(cmd) +} + +// validateArgs is a checker to ensure tests are not running commands which are +// not supported on platforms. Specifically on Windows this is 'busybox top'. +func validateArgs(args ...string) error { + if testEnv.DaemonPlatform() != "windows" { + return nil + } + foundBusybox := -1 + for key, value := range args { + if strings.ToLower(value) == "busybox" { + foundBusybox = key + } + if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { + return errors.New("cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") + } + } + return nil +} + +// Build executes the specified docker build command +func Build(name string) icmd.Cmd { + return icmd.Command("build", "-t", name) +} + +// Inspect executes the specified docker inspect command +func Inspect(name string) icmd.Cmd { + return icmd.Command("inspect", name) +} + +// Format sets the specified format with --format flag +func Format(format string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append( + []string{cmd.Command[0]}, + append([]string{"--format", fmt.Sprintf("{{%s}}", format)}, cmd.Command[1:]...)..., + ) + return nil + } +} + +func appendDocker(cmd *icmd.Cmd) { + cmd.Command = append([]string{testEnv.DockerBinary()}, cmd.Command...) +} + +// Args build an icmd.Cmd struct from the specified arguments +func Args(args ...string) icmd.Cmd { + switch len(args) { + case 0: + return icmd.Cmd{} + case 1: + return icmd.Command(args[0]) + default: + return icmd.Command(args[0], args[1:]...) + } +} + +// Daemon points to the specified daemon +func Daemon(d *daemon.Daemon) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append([]string{"--host", d.Sock()}, cmd.Command...) + return nil + } +} + +// WithTimeout sets the timeout for the command to run +func WithTimeout(timeout time.Duration) func(cmd *icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Timeout = timeout + return nil + } +} + +// WithEnvironmentVariables sets the specified environment variables for the command to run +func WithEnvironmentVariables(envs ...string) func(cmd *icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Env = envs + return nil + } +} + +// WithFlags sets the specified flags for the command to run +func WithFlags(flags ...string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, flags...) + return nil + } +} + +// InDir sets the folder in which the command should be executed +func InDir(path string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Dir = path + return nil + } +} + +// WithStdout sets the standard output writer of the command +func WithStdout(writer io.Writer) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Stdout = writer + return nil + } +} diff --git a/integration-cli/daemon/daemon.go b/integration-cli/daemon/daemon.go new file mode 100644 index 0000000..8b086c9 --- /dev/null +++ b/integration-cli/daemon/daemon.go @@ -0,0 +1,815 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" + "github.com/pkg/errors" +) + +type testingT interface { + logT + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// SockRoot holds the path of the default docker integration daemon socket +var SockRoot = filepath.Join(os.TempDir(), "docker-integration") + +var errDaemonNotStarted = errors.New("daemon not started") + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + GlobalFlags []string + Root string + Folder string + Wait chan error + UseDefaultHost bool + UseDefaultTLSHost bool + + id string + logFile *os.File + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + userlandProxy bool + execRoot string + experimental bool + dockerBinary string + dockerdBinary string + log logT +} + +// Config holds docker daemon integration configuration +type Config struct { + Experimental bool +} + +type clientConfig struct { + transport *http.Transport + scheme string + addr string +} + +// New returns a Daemon instance to be used for testing. +// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. +// The daemon will not automatically start. +func New(t testingT, dockerBinary string, dockerdBinary string, config Config) *Daemon { + dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") + if dest == "" { + dest = os.Getenv("DEST") + } + if dest == "" { + t.Fatalf("Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") + } + + if err := os.MkdirAll(SockRoot, 0700); err != nil { + t.Fatalf("could not create daemon socket root") + } + + id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) + dir := filepath.Join(dest, id) + daemonFolder, err := filepath.Abs(dir) + if err != nil { + t.Fatalf("Could not make %q an absolute path", dir) + } + daemonRoot := filepath.Join(daemonFolder, "root") + + if err := os.MkdirAll(daemonRoot, 0755); err != nil { + t.Fatalf("Could not create daemon root %q", dir) + } + + userlandProxy := true + if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { + if val, err := strconv.ParseBool(env); err != nil { + userlandProxy = val + } + } + + return &Daemon{ + id: id, + Folder: daemonFolder, + Root: daemonRoot, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + userlandProxy: userlandProxy, + execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), + dockerBinary: dockerBinary, + dockerdBinary: dockerdBinary, + experimental: config.Experimental, + log: t, + } +} + +// RootDir returns the root directory of the daemon. +func (d *Daemon) RootDir() string { + return d.Root +} + +// ID returns the generated id of the daemon +func (d *Daemon) ID() string { + return d.id +} + +// StorageDriver returns the configured storage driver of the daemon +func (d *Daemon) StorageDriver() string { + return d.storageDriver +} + +// CleanupExecRoot cleans the daemon exec root (network namespaces, ...) +func (d *Daemon) CleanupExecRoot(c *check.C) { + cleanupExecRoot(c, d.execRoot) +} + +func (d *Daemon) getClientConfig() (*clientConfig, error) { + var ( + transport *http.Transport + scheme string + addr string + proto string + ) + if d.UseDefaultTLSHost { + option := &tlsconfig.Options{ + CAFile: "fixtures/https/ca.pem", + CertFile: "fixtures/https/client-cert.pem", + KeyFile: "fixtures/https/client-key.pem", + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) + scheme = "https" + proto = "tcp" + } else if d.UseDefaultHost { + addr = opts.DefaultUnixSocket + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } else { + addr = d.sockPath() + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } + + if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { + return nil, err + } + transport.DisableKeepAlives = true + + return &clientConfig{ + transport: transport, + scheme: scheme, + addr: addr, + }, nil +} + +// Start starts the daemon and return once it is ready to receive requests. +func (d *Daemon) Start(t testingT, args ...string) { + if err := d.StartWithError(args...); err != nil { + t.Fatalf("Error starting daemon with arguments: %v", args) + } +} + +// StartWithError starts the daemon and return once it is ready to receive requests. +// It returns an error in case it couldn't start. +func (d *Daemon) StartWithError(args ...string) error { + logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder) + } + + return d.StartWithLogFile(logFile, args...) +} + +// StartWithLogFile will start the daemon and attach its streams to a given file. +func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { + dockerdBinary, err := exec.LookPath(d.dockerdBinary) + if err != nil { + return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) + } + args := append(d.GlobalFlags, + "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", + "--data-root", d.Root, + "--exec-root", d.execRoot, + "--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder), + fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), + ) + if d.experimental { + args = append(args, "--experimental", "--init") + } + if !(d.UseDefaultHost || d.UseDefaultTLSHost) { + args = append(args, []string{"--host", d.Sock()}...) + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + args = append(args, []string{"--userns-remap", root}...) + } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundLog := false + foundSd := false + for _, a := range providedArgs { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { + foundLog = true + } + if strings.Contains(a, "--storage-driver") { + foundSd = true + } + } + if !foundLog { + args = append(args, "--debug") + } + if d.storageDriver != "" && !foundSd { + args = append(args, "--storage-driver", d.storageDriver) + } + + args = append(args, providedArgs...) + d.cmd = exec.Command(dockerdBinary, args...) + d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") + d.cmd.Stdout = out + d.cmd.Stderr = out + d.logFile = out + + if err := d.cmd.Start(); err != nil { + return errors.Errorf("[%s] could not start daemon container: %v", d.id, err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.log.Logf("[%s] exiting daemon", d.id) + close(wait) + }() + + d.Wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + startTime := time.Now().Unix() + for { + d.log.Logf("[%s] waiting for daemon to start", d.id) + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return errors.Errorf("[%s] Daemon exited and never started", d.id) + } + select { + case <-time.After(2 * time.Second): + return errors.Errorf("[%s] timeout: daemon does not respond", d.id) + case <-tick: + clientConfig, err := d.getClientConfig() + if err != nil { + return err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/_ping", nil) + if err != nil { + return errors.Wrapf(err, "[%s] could not create new request", d.id) + } + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + resp, err := client.Do(req) + if err != nil { + continue + } + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) + } + d.log.Logf("[%s] daemon started\n", d.id) + d.Root, err = d.queryRootDir() + if err != nil { + return errors.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) + } + return nil + case <-d.Wait: + return errors.Errorf("[%s] Daemon exited during startup", d.id) + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(t testingT, arg ...string) { + d.Start(t, arg...) + if err := d.LoadBusybox(); err != nil { + t.Fatalf("Error loading busybox image to current daemon: %s\n%v", d.id, err) + } +} + +// Kill will send a SIGKILL to the daemon +func (d *Daemon) Kill() error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + if err := d.cmd.Process.Kill(); err != nil { + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil { + return err + } + + return nil +} + +// Pid returns the pid of the daemon +func (d *Daemon) Pid() int { + return d.cmd.Process.Pid +} + +// Interrupt stops the daemon by sending it an Interrupt signal +func (d *Daemon) Interrupt() error { + return d.Signal(os.Interrupt) +} + +// Signal sends the specified signal to the daemon if running +func (d *Daemon) Signal(signal os.Signal) error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + return d.cmd.Process.Signal(signal) +} + +// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its +// stack to its log file and exit +// This is used primarily for gathering debug information on test timeout +func (d *Daemon) DumpStackAndQuit() { + if d.cmd == nil || d.cmd.Process == nil { + return + } + SignalDaemonDump(d.cmd.Process.Pid) +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it times out, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +// If an error occurs while starting the daemon, the test will fail. +func (d *Daemon) Stop(t testingT) { + err := d.StopWithError() + if err != nil { + if err != errDaemonNotStarted { + t.Fatalf("Error while stopping the daemon %s : %v", d.id, err) + } else { + t.Logf("Daemon %s is not started", d.id) + } + } +} + +// StopWithError will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) StopWithError() error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + if strings.Contains(err.Error(), "os: process already finished") { + return errDaemonNotStarted + } + return errors.Errorf("could not send signal: %v", err) + } +out1: + for { + select { + case err := <-d.Wait: + return err + case <-time.After(20 * time.Second): + // time for stopping jobs and run onShutdown hooks + d.log.Logf("[%s] daemon started", d.id) + break out1 + } + } + +out2: + for { + select { + case err := <-d.Wait: + return err + case <-tick: + i++ + if i > 5 { + d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i) + break out2 + } + d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return errors.Errorf("could not send signal: %v", err) + } + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.log.Logf("Could not kill daemon: %v", err) + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil { + return err + } + + return nil +} + +// Restart will restart the daemon by first stopping it and the starting it. +// If an error occurs while starting the daemon, the test will fail. +func (d *Daemon) Restart(t testingT, args ...string) { + d.Stop(t) + d.handleUserns() + d.Start(t, args...) +} + +// RestartWithError will restart the daemon by first stopping it and then starting it. +func (d *Daemon) RestartWithError(arg ...string) error { + if err := d.StopWithError(); err != nil { + return err + } + d.handleUserns() + return d.StartWithError(arg...) +} + +func (d *Daemon) handleUserns() { + // in the case of tests running a user namespace-enabled daemon, we have resolved + // d.Root to be the actual final path of the graph dir after the "uid.gid" of + // remapped root is added--we need to subtract it from the path before calling + // start or else we will continue making subdirectories rather than truly restarting + // with the same location/root: + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + d.Root = filepath.Dir(d.Root) + } +} + +// LoadBusybox will load the stored busybox into a newly started daemon +func (d *Daemon) LoadBusybox() error { + bb := filepath.Join(d.Folder, "busybox.tar") + if _, err := os.Stat(bb); err != nil { + if !os.IsNotExist(err) { + return errors.Errorf("unexpected error on busybox.tar stat: %v", err) + } + // saving busybox image from main daemon + if out, err := exec.Command(d.dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { + imagesOut, _ := exec.Command(d.dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() + return errors.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut))) + } + } + // loading busybox image to this daemon + if out, err := d.Cmd("load", "--input", bb); err != nil { + return errors.Errorf("could not load busybox image: %s", out) + } + if err := os.Remove(bb); err != nil { + return err + } + return nil +} + +func (d *Daemon) queryRootDir() (string, error) { + // update daemon root by asking /info endpoint (to support user + // namespaced daemon with root remapped uid.gid directory) + clientConfig, err := d.getClientConfig() + if err != nil { + return "", err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/json") + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + + resp, err := client.Do(req) + if err != nil { + return "", err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + return resp.Body.Close() + }) + + type Info struct { + DockerRootDir string + } + var b []byte + var i Info + b, err = testutil.ReadBody(body) + if err == nil && resp.StatusCode == http.StatusOK { + // read the docker root dir + if err = json.Unmarshal(b, &i); err == nil { + return i.DockerRootDir, nil + } + } + return "", err +} + +// Sock returns the socket path of the daemon +func (d *Daemon) Sock() string { + return fmt.Sprintf("unix://" + d.sockPath()) +} + +func (d *Daemon) sockPath() string { + return filepath.Join(SockRoot, d.id+".sock") +} + +// WaitRun waits for a container to be running for 10s +func (d *Daemon) WaitRun(contID string) error { + args := []string{"--host", d.Sock()} + return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...) +} + +// GetBaseDeviceSize returns the base device size of the daemon +func (d *Daemon) GetBaseDeviceSize(c *check.C) int64 { + infoCmdOutput, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(d.dockerBinary, "-H", d.Sock(), "info"), + exec.Command("grep", "Base Device Size"), + ) + c.Assert(err, checker.IsNil) + basesizeSlice := strings.Split(infoCmdOutput, ":") + basesize := strings.Trim(basesizeSlice[1], " ") + basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + c.Assert(err, checker.IsNil) + basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) + return basesizeBytes +} + +// Cmd executes a docker CLI command against this daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(args ...string) (string, error) { + result := icmd.RunCmd(d.Command(args...)) + return result.Combined(), result.Error +} + +// Command creates a docker CLI command against this daemon, to be executed later. +// Example: d.Command("version") creates a command to run "docker -H unix://path/to/unix.sock version" +func (d *Daemon) Command(args ...string) icmd.Cmd { + return icmd.Command(d.dockerBinary, d.PrependHostArg(args)...) +} + +// PrependHostArg prepend the specified arguments by the daemon host flags +func (d *Daemon) PrependHostArg(args []string) []string { + for _, arg := range args { + if arg == "--host" || arg == "-H" { + return args + } + } + return append([]string{"--host", d.Sock()}, args...) +} + +// SockRequest executes a socket request on a daemon and returns statuscode and output. +func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := testutil.ReadBody(body) + return res.StatusCode, b, err +} + +// SockRequestRaw executes a socket request on a daemon and returns an http +// response and a reader for the output data. +// Deprecated: use request package instead +func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + return request.SockRequestRaw(method, endpoint, data, ct, d.Sock()) +} + +// LogFileName returns the path the daemon's log file +func (d *Daemon) LogFileName() string { + return d.logFile.Name() +} + +// GetIDByName returns the ID of an object (container, volume, …) given its name +func (d *Daemon) GetIDByName(name string) (string, error) { + return d.inspectFieldWithError(name, "Id") +} + +// ActiveContainers returns the list of ids of the currently running containers +func (d *Daemon) ActiveContainers() (ids []string) { + // FIXME(vdemeester) shouldn't ignore the error + out, _ := d.Cmd("ps", "-q") + for _, id := range strings.Split(out, "\n") { + if id = strings.TrimSpace(id); id != "" { + ids = append(ids, id) + } + } + return +} + +// ReadLogFile returns the content of the daemon log file +func (d *Daemon) ReadLogFile() ([]byte, error) { + return ioutil.ReadFile(d.logFile.Name()) +} + +// InspectField returns the field filter by 'filter' +func (d *Daemon) InspectField(name, filter string) (string, error) { + return d.inspectFilter(name, filter) +} + +func (d *Daemon) inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + out, err := d.Cmd("inspect", "-f", format, name) + if err != nil { + return "", errors.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { + return d.inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +// FindContainerIP returns the ip of the specified container +func (d *Daemon) FindContainerIP(id string) (string, error) { + out, err := d.Cmd("inspect", "--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'", id) + if err != nil { + return "", err + } + return strings.Trim(out, " \r\n'"), nil +} + +// BuildImageWithOut builds an image with the specified dockerfile and options and returns the output +func (d *Daemon) BuildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { + buildCmd := BuildImageCmdWithHost(d.dockerBinary, name, dockerfile, d.Sock(), useCache, buildFlags...) + result := icmd.RunCmd(icmd.Cmd{ + Command: buildCmd.Args, + Env: buildCmd.Env, + Dir: buildCmd.Dir, + Stdin: buildCmd.Stdin, + Stdout: buildCmd.Stdout, + }) + return result.Combined(), result.ExitCode, result.Error +} + +// CheckActiveContainerCount returns the number of active containers +// FIXME(vdemeester) should re-use ActivateContainers in some way +func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + if len(strings.TrimSpace(out)) == 0 { + return 0, nil + } + return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) +} + +// ReloadConfig asks the daemon to reload its configuration +func (d *Daemon) ReloadConfig() error { + if d.cmd == nil || d.cmd.Process == nil { + return errors.New("daemon is not running") + } + + errCh := make(chan error) + started := make(chan struct{}) + go func() { + _, body, err := request.DoOnHost(d.Sock(), "/events", request.Method(http.MethodGet)) + close(started) + if err != nil { + errCh <- err + } + defer body.Close() + dec := json.NewDecoder(body) + for { + var e events.Message + if err := dec.Decode(&e); err != nil { + errCh <- err + return + } + if e.Type != events.DaemonEventType { + continue + } + if e.Action != "reload" { + continue + } + close(errCh) // notify that we are done + return + } + }() + + <-started + if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { + return errors.Errorf("error signaling daemon reload: %v", err) + } + select { + case err := <-errCh: + if err != nil { + return errors.Errorf("error waiting for daemon reload event: %v", err) + } + case <-time.After(30 * time.Second): + return errors.New("timeout waiting for daemon reload event") + } + return nil +} + +// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time. +// Deprecated: use cli.WaitCmd instead +func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error { + after := time.After(timeout) + + args := append(arg, "inspect", "-f", expr, name) + for { + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { + return errors.Errorf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + return result.Error + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + return errors.Errorf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) + default: + } + + time.Sleep(100 * time.Millisecond) + } + return nil +} + +// BuildImageCmdWithHost create a build command with the specified arguments. +// Deprecated +// FIXME(vdemeester) move this away +func BuildImageCmdWithHost(dockerBinary, name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { + args := []string{} + if host != "" { + args = append(args, "--host", host) + } + args = append(args, "build", "-t", name) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd +} diff --git a/integration-cli/daemon/daemon_swarm.go b/integration-cli/daemon/daemon_swarm.go new file mode 100644 index 0000000..a7058c5 --- /dev/null +++ b/integration-cli/daemon/daemon_swarm.go @@ -0,0 +1,566 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" + "github.com/pkg/errors" +) + +// Swarm is a test daemon with helpers for participating in a swarm. +type Swarm struct { + *Daemon + swarm.Info + Port int + ListenAddr string +} + +// Init initializes a new swarm cluster. +func (d *Swarm) Init(req swarm.InitRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.ListenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/init", req) + if status != http.StatusOK { + return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("initializing swarm: %v", err) + } + info, err := d.SwarmInfo() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Join joins a daemon to an existing cluster. +func (d *Swarm) Join(req swarm.JoinRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.ListenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/join", req) + if status != http.StatusOK { + return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("joining swarm: %v", err) + } + info, err := d.SwarmInfo() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Leave forces daemon to leave current cluster. +func (d *Swarm) Leave(force bool) error { + url := "/swarm/leave" + if force { + url += "?force=1" + } + status, out, err := d.SockRequest("POST", url, nil) + if status != http.StatusOK { + return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + err = fmt.Errorf("leaving swarm: %v", err) + } + return err +} + +// SwarmInfo returns the swarm information of the daemon +func (d *Swarm) SwarmInfo() (swarm.Info, error) { + var info struct { + Swarm swarm.Info + } + status, dt, err := d.SockRequest("GET", "/info", nil) + if status != http.StatusOK { + return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status) + } + if err != nil { + return info.Swarm, fmt.Errorf("get swarm info: %v", err) + } + if err := json.Unmarshal(dt, &info); err != nil { + return info.Swarm, err + } + return info.Swarm, nil +} + +// Unlock tries to unlock a locked swarm +func (d *Swarm) Unlock(req swarm.UnlockRequest) error { + status, out, err := d.SockRequest("POST", "/swarm/unlock", req) + if status != http.StatusOK { + return fmt.Errorf("unlocking swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + err = errors.Wrap(err, "unlocking swarm") + } + return err +} + +// ServiceConstructor defines a swarm service constructor function +type ServiceConstructor func(*swarm.Service) + +// NodeConstructor defines a swarm node constructor +type NodeConstructor func(*swarm.Node) + +// SecretConstructor defines a swarm secret constructor +type SecretConstructor func(*swarm.Secret) + +// ConfigConstructor defines a swarm config constructor +type ConfigConstructor func(*swarm.Config) + +// SpecConstructor defines a swarm spec constructor +type SpecConstructor func(*swarm.Spec) + +// CreateService creates a swarm service given the specified service constructor +func (d *Swarm) CreateService(c *check.C, f ...ServiceConstructor) string { + var service swarm.Service + for _, fn := range f { + fn(&service) + } + status, out, err := d.SockRequest("POST", "/services/create", service.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.ServiceCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +// GetService returns the swarm service corresponding to the specified id +func (d *Swarm) GetService(c *check.C, id string) *swarm.Service { + var service swarm.Service + status, out, err := d.SockRequest("GET", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &service), checker.IsNil) + return &service +} + +// GetServiceTasks returns the swarm tasks for the specified service +func (d *Swarm) GetServiceTasks(c *check.C, service string) []swarm.Task { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filterArgs.Add("service", service) + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + return tasks +} + +// CheckServiceTasksInState returns the number of tasks with a matching state, +// and optional message substring. +func (d *Swarm) CheckServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, service) + var count int + for _, task := range tasks { + if task.Status.State == state { + if message == "" || strings.Contains(task.Status.Message, message) { + count++ + } + } + } + return count, nil + } +} + +// CheckServiceRunningTasks returns the number of running tasks for the specified service +func (d *Swarm) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return d.CheckServiceTasksInState(service, swarm.TaskStateRunning, "") +} + +// CheckServiceUpdateState returns the current update state for the specified service +func (d *Swarm) CheckServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + service := d.GetService(c, service) + if service.UpdateStatus == nil { + return "", nil + } + return service.UpdateStatus.State, nil + } +} + +// CheckServiceTasks returns the number of tasks for the specified service +func (d *Swarm) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, service) + return len(tasks), nil + } +} + +// CheckRunningTaskNetworks returns the number of times each network is referenced from a task. +func (d *Swarm) CheckRunningTaskNetworks(c *check.C) (interface{}, check.CommentInterface) { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + for _, network := range task.Spec.Networks { + result[network.Target]++ + } + } + return result, nil +} + +// CheckRunningTaskImages returns the times each image is running as a task. +func (d *Swarm) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + if task.Status.State == swarm.TaskStateRunning { + result[task.Spec.ContainerSpec.Image]++ + } + } + return result, nil +} + +// CheckNodeReadyCount returns the number of ready node on the swarm +func (d *Swarm) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { + nodes := d.ListNodes(c) + var readyCount int + for _, node := range nodes { + if node.Status.State == swarm.NodeStateReady { + readyCount++ + } + } + return readyCount, nil +} + +// GetTask returns the swarm task identified by the specified id +func (d *Swarm) GetTask(c *check.C, id string) swarm.Task { + var task swarm.Task + + status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &task), checker.IsNil) + return task +} + +// UpdateService updates a swarm service with the specified service constructor +func (d *Swarm) UpdateService(c *check.C, service *swarm.Service, f ...ServiceConstructor) { + for _, fn := range f { + fn(service) + } + url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// RemoveService removes the specified service +func (d *Swarm) RemoveService(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// GetNode returns a swarm node identified by the specified id +func (d *Swarm) GetNode(c *check.C, id string) *swarm.Node { + var node swarm.Node + status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &node), checker.IsNil) + c.Assert(node.ID, checker.Equals, id) + return &node +} + +// RemoveNode removes the specified node +func (d *Swarm) RemoveNode(c *check.C, id string, force bool) { + url := "/nodes/" + id + if force { + url += "?force=1" + } + + status, out, err := d.SockRequest("DELETE", url, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// UpdateNode updates a swarm node with the specified node constructor +func (d *Swarm) UpdateNode(c *check.C, id string, f ...NodeConstructor) { + for i := 0; ; i++ { + node := d.GetNode(c, id) + for _, fn := range f { + fn(node) + } + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d.SockRequest("POST", url, node.Spec) + if i < 10 && strings.Contains(string(out), "update out of sequence") { + time.Sleep(100 * time.Millisecond) + continue + } + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + return + } +} + +// ListNodes returns the list of the current swarm nodes +func (d *Swarm) ListNodes(c *check.C) []swarm.Node { + status, out, err := d.SockRequest("GET", "/nodes", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + nodes := []swarm.Node{} + c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) + return nodes +} + +// ListServices returns the list of the current swarm services +func (d *Swarm) ListServices(c *check.C) []swarm.Service { + status, out, err := d.SockRequest("GET", "/services", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + services := []swarm.Service{} + c.Assert(json.Unmarshal(out, &services), checker.IsNil) + return services +} + +// CreateSecret creates a secret given the specified spec +func (d *Swarm) CreateSecret(c *check.C, secretSpec swarm.SecretSpec) string { + status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.SecretCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +// ListSecrets returns the list of the current swarm secrets +func (d *Swarm) ListSecrets(c *check.C) []swarm.Secret { + status, out, err := d.SockRequest("GET", "/secrets", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + secrets := []swarm.Secret{} + c.Assert(json.Unmarshal(out, &secrets), checker.IsNil) + return secrets +} + +// GetSecret returns a swarm secret identified by the specified id +func (d *Swarm) GetSecret(c *check.C, id string) *swarm.Secret { + var secret swarm.Secret + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &secret), checker.IsNil) + return &secret +} + +// DeleteSecret removes the swarm secret identified by the specified id +func (d *Swarm) DeleteSecret(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) +} + +// UpdateSecret updates the swarm secret identified by the specified id +// Currently, only label update is supported. +func (d *Swarm) UpdateSecret(c *check.C, id string, f ...SecretConstructor) { + secret := d.GetSecret(c, id) + for _, fn := range f { + fn(secret) + } + url := fmt.Sprintf("/secrets/%s/update?version=%d", secret.ID, secret.Version.Index) + status, out, err := d.SockRequest("POST", url, secret.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// CreateConfig creates a config given the specified spec +func (d *Swarm) CreateConfig(c *check.C, configSpec swarm.ConfigSpec) string { + status, out, err := d.SockRequest("POST", "/configs/create", configSpec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.ConfigCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +// ListConfigs returns the list of the current swarm configs +func (d *Swarm) ListConfigs(c *check.C) []swarm.Config { + status, out, err := d.SockRequest("GET", "/configs", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + configs := []swarm.Config{} + c.Assert(json.Unmarshal(out, &configs), checker.IsNil) + return configs +} + +// GetConfig returns a swarm config identified by the specified id +func (d *Swarm) GetConfig(c *check.C, id string) *swarm.Config { + var config swarm.Config + status, out, err := d.SockRequest("GET", "/configs/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &config), checker.IsNil) + return &config +} + +// DeleteConfig removes the swarm config identified by the specified id +func (d *Swarm) DeleteConfig(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/configs/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) +} + +// UpdateConfig updates the swarm config identified by the specified id +// Currently, only label update is supported. +func (d *Swarm) UpdateConfig(c *check.C, id string, f ...ConfigConstructor) { + config := d.GetConfig(c, id) + for _, fn := range f { + fn(config) + } + url := fmt.Sprintf("/configs/%s/update?version=%d", config.ID, config.Version.Index) + status, out, err := d.SockRequest("POST", url, config.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// GetSwarm returns the current swarm object +func (d *Swarm) GetSwarm(c *check.C) swarm.Swarm { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw +} + +// UpdateSwarm updates the current swarm object with the specified spec constructors +func (d *Swarm) UpdateSwarm(c *check.C, f ...SpecConstructor) { + sw := d.GetSwarm(c) + for _, fn := range f { + fn(&sw.Spec) + } + url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index) + status, out, err := d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// RotateTokens update the swarm to rotate tokens +func (d *Swarm) RotateTokens(c *check.C) { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + + url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index) + status, out, err = d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +// JoinTokens returns the current swarm join tokens +func (d *Swarm) JoinTokens(c *check.C) swarm.JoinTokens { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw.JoinTokens +} + +// CheckLocalNodeState returns the current swarm node state +func (d *Swarm) CheckLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + return info.LocalNodeState, nil +} + +// CheckControlAvailable returns the current swarm control available +func (d *Swarm) CheckControlAvailable(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + return info.ControlAvailable, nil +} + +// CheckLeader returns whether there is a leader on the swarm or not +func (d *Swarm) CheckLeader(c *check.C) (interface{}, check.CommentInterface) { + errList := check.Commentf("could not get node list") + status, out, err := d.SockRequest("GET", "/nodes", nil) + if err != nil { + return err, errList + } + if status != http.StatusOK { + return fmt.Errorf("expected http status OK, got: %d", status), errList + } + + var ls []swarm.Node + if err := json.Unmarshal(out, &ls); err != nil { + return err, errList + } + + for _, node := range ls { + if node.ManagerStatus != nil && node.ManagerStatus.Leader { + return nil, nil + } + } + return fmt.Errorf("no leader"), check.Commentf("could not find leader") +} + +// CmdRetryOutOfSequence tries the specified command against the current daemon for 10 times +func (d *Swarm) CmdRetryOutOfSequence(args ...string) (string, error) { + for i := 0; ; i++ { + out, err := d.Cmd(args...) + if err != nil { + if strings.Contains(out, "update out of sequence") { + if i < 10 { + continue + } + } + } + return out, err + } +} diff --git a/integration-cli/daemon/daemon_unix.go b/integration-cli/daemon/daemon_unix.go new file mode 100644 index 0000000..cacff72 --- /dev/null +++ b/integration-cli/daemon/daemon_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package daemon + +import ( + "os" + "path/filepath" + "syscall" + + "github.com/go-check/check" +) + +func cleanupExecRoot(c *check.C, execRoot string) { + // Cleanup network namespaces in the exec root of this + // daemon because this exec root is specific to this + // daemon instance and has no chance of getting + // cleaned up when a new daemon is instantiated with a + // new exec root. + netnsPath := filepath.Join(execRoot, "netns") + filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { + if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil { + c.Logf("unmount of %s failed: %v", path, err) + } + os.Remove(path) + return nil + }) +} + +// SignalDaemonDump sends a signal to the daemon to write a dump file +func SignalDaemonDump(pid int) { + syscall.Kill(pid, syscall.SIGQUIT) +} + +func signalDaemonReload(pid int) error { + return syscall.Kill(pid, syscall.SIGHUP) +} diff --git a/integration-cli/daemon/daemon_windows.go b/integration-cli/daemon/daemon_windows.go new file mode 100644 index 0000000..81dae7a --- /dev/null +++ b/integration-cli/daemon/daemon_windows.go @@ -0,0 +1,54 @@ +package daemon + +import ( + "fmt" + "strconv" + "syscall" + "unsafe" + + "github.com/go-check/check" + "golang.org/x/sys/windows" +) + +func openEvent(desiredAccess uint32, inheritHandle bool, name string, proc *windows.LazyProc) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p2 uint32 + if inheritHandle { + _p2 = 1 + } + r0, _, e1 := proc.Call(uintptr(desiredAccess), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +// SignalDaemonDump sends a signal to the daemon to write a dump file +func SignalDaemonDump(pid int) { + modkernel32 := windows.NewLazySystemDLL("kernel32.dll") + procOpenEvent := modkernel32.NewProc("OpenEventW") + procPulseEvent := modkernel32.NewProc("PulseEvent") + + ev := "Global\\docker-daemon-" + strconv.Itoa(pid) + h2, _ := openEvent(0x0002, false, ev, procOpenEvent) + if h2 == 0 { + return + } + pulseEvent(h2, procPulseEvent) +} + +func signalDaemonReload(pid int) error { + return fmt.Errorf("daemon reload not supported") +} + +func cleanupExecRoot(c *check.C, execRoot string) { +} diff --git a/integration-cli/daemon_swarm_hack_test.go b/integration-cli/daemon_swarm_hack_test.go new file mode 100644 index 0000000..e1fb333 --- /dev/null +++ b/integration-cli/daemon_swarm_hack_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *daemon.Swarm { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + if d.NodeID == nodeID { + return d + } + } + c.Fatalf("could not find node with id: %s", nodeID) + return nil +} + +// nodeCmd executes a command on a given node via the normal docker socket +func (s *DockerSwarmSuite) nodeCmd(c *check.C, id string, args ...string) (string, error) { + return s.getDaemon(c, id).Cmd(args...) +} diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go new file mode 100644 index 0000000..11f7340 --- /dev/null +++ b/integration-cli/docker_api_attach_test.go @@ -0,0 +1,210 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "io" + "net" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" + "golang.org/x/net/websocket" +) + +func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") + + rwc, err := request.SockConn(time.Duration(10*time.Second), daemonHost()) + c.Assert(err, checker.IsNil) + + cleanedContainerID := strings.TrimSpace(out) + config, err := websocket.NewConfig( + "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", + "http://localhost", + ) + c.Assert(err, checker.IsNil) + + ws, err := websocket.NewClient(config, rwc) + c.Assert(err, checker.IsNil) + defer ws.Close() + + expected := []byte("hello") + actual := make([]byte, len(expected)) + + outChan := make(chan error) + go func() { + _, err := io.ReadFull(ws, actual) + outChan <- err + close(outChan) + }() + + inChan := make(chan error) + go func() { + _, err := ws.Write(expected) + inChan <- err + close(inChan) + }() + + select { + case err := <-inChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to ws") + } + + select { + case err := <-outChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from ws") + } + + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("Websocket didn't return the expected data")) +} + +// regression gh14320 +func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { + client, err := request.NewHTTPClient(daemonHost()) + c.Assert(err, checker.IsNil) + req, err := request.New(daemonHost(), "/containers/doesnotexist/attach", request.Method(http.MethodPost)) + resp, err := client.Do(req) + // connection will shutdown, err should be "persistent connection closed" + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) + content, err := testutil.ReadBody(resp.Body) + c.Assert(err, checker.IsNil) + expected := "No such container: doesnotexist\r\n" + c.Assert(string(content), checker.Equals, expected) +} + +func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { + status, body, err := request.SockRequest("GET", "/containers/doesnotexist/attach/ws", nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(err, checker.IsNil) + expected := "No such container: doesnotexist" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +func (s *DockerSuite) TestPostContainersAttach(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) { + defer conn.Close() + expected := []byte("success") + _, err := conn.Write(expected) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + lenHeader := 0 + if !tty { + lenHeader = 8 + } + actual := make([]byte, len(expected)+lenHeader) + _, err = io.ReadFull(br, actual) + c.Assert(err, checker.IsNil) + if !tty { + fdMap := map[string]byte{ + "stdin": 0, + "stdout": 1, + "stderr": 2, + } + c.Assert(actual[0], checker.Equals, fdMap[stream]) + } + c.Assert(actual[lenHeader:], checker.DeepEquals, expected, check.Commentf("Attach didn't return the expected data from %s", stream)) + } + + expectTimeout := func(conn net.Conn, br *bufio.Reader, stream string) { + defer conn.Close() + _, err := conn.Write([]byte{'t'}) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + actual := make([]byte, 1) + _, err = io.ReadFull(br, actual) + opErr, ok := err.(*net.OpError) + c.Assert(ok, checker.Equals, true, check.Commentf("Error is expected to be *net.OpError, got %v", err)) + c.Assert(opErr.Timeout(), checker.Equals, true, check.Commentf("Read from %s is expected to timeout", stream)) + } + + // Create a container that only emits stdout. + cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + cid = strings.TrimSpace(cid) + // Attach to the container's stdout stream. + conn, br, err := request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + // Check if the data from stdout can be received. + expectSuccess(conn, br, "stdout", false) + // Attach to the container's stderr stream. + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + // Since the container only emits stdout, attaching to stderr should return nothing. + expectTimeout(conn, br, "stdout") + + // Test the similar functions of the stderr stream. + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stderr", false) + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + expectTimeout(conn, br, "stderr") + + // Test with tty. + cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + // Attach to stdout only. + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stdout", true) + + // Attach without stdout stream. + conn, br, err = request.SockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) + c.Assert(err, checker.IsNil) + // Nothing should be received because both the stdout and stderr of the container will be + // sent to the client as stdout when tty is enabled. + expectTimeout(conn, br, "stdout") + + // Test the client API + // Make sure we don't see "hello" if Logs is false + client, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "echo hello; cat") + cid = strings.TrimSpace(cid) + + attachOpts := types.ContainerAttachOptions{ + Stream: true, + Stdin: true, + Stdout: true, + } + + resp, err := client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + expectSuccess(resp.Conn, resp.Reader, "stdout", false) + + // Make sure we do see "hello" if Logs is true + attachOpts.Logs = true + resp, err = client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + + defer resp.Conn.Close() + resp.Conn.SetReadDeadline(time.Now().Add(time.Second)) + + _, err = resp.Conn.Write([]byte("success")) + c.Assert(err, checker.IsNil) + + actualStdout := new(bytes.Buffer) + actualStderr := new(bytes.Buffer) + stdcopy.StdCopy(actualStdout, actualStderr, resp.Reader) + c.Assert(actualStdout.Bytes(), checker.DeepEquals, []byte("hello\nsuccess"), check.Commentf("Attach didn't return the expected data from stdout")) +} diff --git a/integration-cli/docker_api_auth_test.go b/integration-cli/docker_api_auth_test.go new file mode 100644 index 0000000..cc903c0 --- /dev/null +++ b/integration-cli/docker_api_auth_test.go @@ -0,0 +1,26 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +// Test case for #22244 +func (s *DockerSuite) TestAuthAPI(c *check.C) { + testRequires(c, Network) + config := types.AuthConfig{ + Username: "no-user", + Password: "no-password", + } + + expected := "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" + status, body, err := request.SockRequest("POST", "/auth", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusUnauthorized) + msg := getErrorMessage(c, body) + c.Assert(msg, checker.Contains, expected, check.Commentf("Expected: %v, got: %v", expected, msg)) +} diff --git a/integration-cli/docker_api_build_test.go b/integration-cli/docker_api_build_test.go new file mode 100644 index 0000000..c1ab766 --- /dev/null +++ b/integration-cli/docker_api_build_test.go @@ -0,0 +1,535 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client/session" + "github.com/docker/docker/client/session/filesync" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakegit" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { + testRequires(c, NotUserNamespace) + var testD string + if testEnv.DaemonPlatform() == "windows" { + testD = `FROM busybox +RUN find / -name ba* +RUN find /tmp/` + } else { + // -xdev is required because sysfs can cause EPERM + testD = `FROM busybox +RUN find / -xdev -name ba* +RUN find /tmp/` + } + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"testD": testD})) + defer server.Close() + + res, body, err := request.Post("/build?dockerfile=baz&remote="+server.URL()+"/testD", request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + // Make sure Dockerfile exists. + // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL + out := string(buf) + c.Assert(out, checker.Contains, "RUN find /tmp") + c.Assert(out, checker.Not(checker.Contains), "baz") +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte("FROM busybox") + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "testT.tar": buffer, + })) + defer server.Close() + + res, b, err := request.Post("/build?remote="+server.URL()+"/testT.tar", request.ContentType("application/tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + b.Close() +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContextWithCustomDockerfile(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox +RUN echo 'wrong'`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + custom := []byte(`FROM busybox +RUN echo 'right' +`) + err = tw.WriteHeader(&tar.Header{ + Name: "custom", + Size: int64(len(custom)), + }) + + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(custom) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "testT.tar": buffer, + })) + defer server.Close() + + url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" + res, body, err := request.Post(url, request.ContentType("application/tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + defer body.Close() + content, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + // Build used the wrong dockerfile. + c.Assert(string(content), checker.Not(checker.Contains), "wrong") +} + +func (s *DockerSuite) TestBuildAPILowerDockerfile(c *check.C) { + git := fakegit.New(c, "repo", map[string]string{ + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + defer git.Close() + + res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from dockerfile") +} + +func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *check.C) { + git := fakegit.New(c, "repo", map[string]string{ + "baz": `FROM busybox +RUN echo from baz`, + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + }, false) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := request.Post("/build?dockerfile=baz&remote="+git.RepoURL, request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from baz") +} + +func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *check.C) { + testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows + git := fakegit.New(c, "repo", map[string]string{ + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from Dockerfile") +} + +func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { + // Make sure that build context tars with entries of the form + // x/./y don't cause caching false positives. + + buildFromTarContext := func(fileContents []byte) string { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + COPY dir /dir/`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write Dockerfile in tar file content + c.Assert(err, checker.IsNil) + + err = tw.WriteHeader(&tar.Header{ + Name: "dir/./file", + Size: int64(len(fileContents)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(fileContents) + // failed to write file contents in tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + res, body, err := request.Post("/build", request.RawContent(ioutil.NopCloser(buffer)), request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + lines := strings.Split(string(out), "\n") + c.Assert(len(lines), checker.GreaterThan, 1) + c.Assert(lines[len(lines)-2], checker.Matches, ".*Successfully built [0-9a-f]{12}.*") + + re := regexp.MustCompile("Successfully built ([0-9a-f]{12})") + matches := re.FindStringSubmatch(lines[len(lines)-2]) + return matches[1] + } + + imageA := buildFromTarContext([]byte("abc")) + imageB := buildFromTarContext([]byte("def")) + + c.Assert(imageA, checker.Not(checker.Equals), imageB) +} + +func (s *DockerSuite) TestBuildOnBuildWithCopy(c *check.C) { + dockerfile := ` + FROM ` + minimalBaseImage() + ` as onbuildbase + ONBUILD COPY file /file + + FROM onbuildbase + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("file", "some content"), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(out), checker.Contains, "Successfully built") +} + +func (s *DockerSuite) TestBuildOnBuildCache(c *check.C) { + build := func(dockerfile string) []byte { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") + return out + } + + dockerfile := ` + FROM ` + minimalBaseImage() + ` as onbuildbase + ENV something=bar + ONBUILD ENV foo=bar + ` + build(dockerfile) + + dockerfile += "FROM onbuildbase" + out := build(dockerfile) + + imageIDs := getImageIDsFromBuild(c, out) + assert.Len(c, imageIDs, 2) + parentID, childID := imageIDs[0], imageIDs[1] + + client, err := request.NewClient() + require.NoError(c, err) + + // check parentID is correct + image, _, err := client.ImageInspectWithRaw(context.Background(), childID) + require.NoError(c, err) + assert.Equal(c, parentID, image.Parent) +} + +func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) { + client, err := request.NewClient() + require.NoError(c, err) + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + err = client.ImageTag(context.TODO(), "busybox", repoName) + assert.Nil(c, err) + // push the image to the registry + rc, err := client.ImagePush(context.TODO(), repoName, types.ImagePushOptions{RegistryAuth: "{}"}) + assert.Nil(c, err) + _, err = io.Copy(ioutil.Discard, rc) + assert.Nil(c, err) + + dockerfile := fmt.Sprintf(` + FROM %s AS foo + RUN touch abc + FROM %s + COPY --from=foo /abc / + `, repoName, repoName) + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build?pull=1", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") +} + +func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + dt := []byte("contents") + err := tw.WriteHeader(&tar.Header{ + Name: "foo", + Size: int64(len(dt)), + Mode: 0600, + Typeflag: tar.TypeReg, + }) + require.NoError(c, err) + _, err = tw.Write(dt) + require.NoError(c, err) + err = tw.Close() + require.NoError(c, err) + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "test.tar": buffer, + })) + defer server.Close() + + dockerfile := fmt.Sprintf(` + FROM busybox + ADD %s/test.tar / + RUN [ -f test.tar ] + `, server.URL()) + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") +} + +func (s *DockerSuite) TestBuildWithSession(c *check.C) { + testRequires(c, ExperimentalDaemon) + + dockerfile := ` + FROM busybox + COPY file / + RUN cat /file + ` + + fctx := fakecontext.New(c, "", + fakecontext.WithFile("file", "some content"), + ) + defer fctx.Close() + + out := testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Contains(c, out, "some content") + + fctx.Add("second", "contentcontent") + + dockerfile += ` + COPY second / + RUN cat /second + ` + + out = testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Equal(c, strings.Count(out, "Using cache"), 2) + assert.Contains(c, out, "contentcontent") + + client, err := request.NewClient() + require.NoError(c, err) + + du, err := client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.True(c, du.BuilderSize > 10) + + out = testBuildWithSession(c, fctx.Dir, dockerfile) + assert.Equal(c, strings.Count(out, "Using cache"), 4) + + du2, err := client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.Equal(c, du.BuilderSize, du2.BuilderSize) + + // rebuild with regular tar, confirm cache still applies + fctx.Add("Dockerfile", dockerfile) + res, body, err := request.Post( + "/build", + request.RawContent(fctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + require.NoError(c, err) + assert.Equal(c, http.StatusOK, res.StatusCode) + + outBytes, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(outBytes), "Successfully built") + assert.Equal(c, strings.Count(string(outBytes), "Using cache"), 4) + + _, err = client.BuildCachePrune(context.TODO()) + assert.Nil(c, err) + + du, err = client.DiskUsage(context.TODO()) + assert.Nil(c, err) + assert.Equal(c, du.BuilderSize, int64(0)) +} + +func testBuildWithSession(c *check.C, dir, dockerfile string) (outStr string) { + client, err := request.NewClient() + require.NoError(c, err) + + sess, err := session.NewSession("foo1", "foo") + assert.Nil(c, err) + + fsProvider := filesync.NewFSSyncProvider(dir, nil) + sess.Allow(fsProvider) + + g, ctx := errgroup.WithContext(context.Background()) + + g.Go(func() error { + return sess.Run(ctx, client.DialSession) + }) + + g.Go(func() error { + res, body, err := request.Post("/build?remote=client-session&session="+sess.UUID(), func(req *http.Request) error { + req.Body = ioutil.NopCloser(strings.NewReader(dockerfile)) + return nil + }) + if err != nil { + return err + } + assert.Equal(c, res.StatusCode, http.StatusOK) + out, err := testutil.ReadBody(body) + require.NoError(c, err) + assert.Contains(c, string(out), "Successfully built") + sess.Close() + outStr = string(out) + return nil + }) + + err = g.Wait() + assert.Nil(c, err) + return +} + +type buildLine struct { + Stream string + Aux struct { + ID string + } +} + +func getImageIDsFromBuild(c *check.C, output []byte) []string { + ids := []string{} + for _, line := range bytes.Split(output, []byte("\n")) { + if len(line) == 0 { + continue + } + entry := buildLine{} + require.NoError(c, json.Unmarshal(line, &entry)) + if entry.Aux.ID != "" { + ids = append(ids, entry.Aux.ID) + } + } + return ids +} diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go new file mode 100644 index 0000000..84e0092 --- /dev/null +++ b/integration-cli/docker_api_containers_test.go @@ -0,0 +1,1950 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestContainerAPIGetAll(c *check.C) { + startCount := getContainerCount(c) + name := "getall" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + status, body, err := request.SockRequest("GET", "/containers/json?all=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var inspectJSON []struct { + Names []string + } + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal response body")) + + c.Assert(inspectJSON, checker.HasLen, startCount+1) + + actual := inspectJSON[0].Names[0] + c.Assert(actual, checker.Equals, "/"+name) +} + +// regression test for empty json field being omitted #13691 +func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { + dockerCmd(c, "run", "busybox", "true") + + status, body, err := request.SockRequest("GET", "/containers/json?all=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + // empty Labels field triggered this bug, make sense to check for everything + // cause even Ports for instance can trigger this bug + // better safe than sorry.. + fields := []string{ + "Id", + "Names", + "Image", + "Command", + "Created", + "Ports", + "Labels", + "Status", + "NetworkSettings", + } + + // decoding into types.Container do not work since it eventually unmarshal + // and empty field to an empty go map, so we just check for a string + for _, f := range fields { + if !strings.Contains(string(body), f) { + c.Fatalf("Field %s is missing and it shouldn't", f) + } + } +} + +type containerPs struct { + Names []string + Ports []map[string]interface{} +} + +// regression test for non-empty fields from #13901 +func (s *DockerSuite) TestContainerAPIPsOmitFields(c *check.C) { + // Problematic for Windows porting due to networking not yet being passed back + testRequires(c, DaemonIsLinux) + name := "pstest" + port := 80 + runSleepingContainer(c, "--name", name, "--expose", strconv.Itoa(port)) + + status, body, err := request.SockRequest("GET", "/containers/json?all=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var resp []containerPs + err = json.Unmarshal(body, &resp) + c.Assert(err, checker.IsNil) + + var foundContainer *containerPs + for _, container := range resp { + for _, testName := range container.Names { + if "/"+name == testName { + foundContainer = &container + break + } + } + } + + c.Assert(foundContainer.Ports, checker.HasLen, 1) + c.Assert(foundContainer.Ports[0]["PrivatePort"], checker.Equals, float64(port)) + _, ok := foundContainer.Ports[0]["PublicPort"] + c.Assert(ok, checker.Not(checker.Equals), true) + _, ok = foundContainer.Ports[0]["IP"] + c.Assert(ok, checker.Not(checker.Equals), true) +} + +func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) { + // Not supported on Windows as Windows does not support docker export + testRequires(c, DaemonIsLinux) + name := "exportcontainer" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") + + status, body, err := request.SockRequest("GET", "/containers/"+name+"/export", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil && err == io.EOF { + break + } + if h.Name == "test" { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("The created test file has not been found in the exported image")) +} + +func (s *DockerSuite) TestContainerAPIGetChanges(c *check.C) { + // Not supported on Windows as Windows does not support docker diff (/containers/name/changes) + testRequires(c, DaemonIsLinux) + name := "changescontainer" + dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") + + status, body, err := request.SockRequest("GET", "/containers/"+name+"/changes", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + changes := []struct { + Kind int + Path string + }{} + c.Assert(json.Unmarshal(body, &changes), checker.IsNil, check.Commentf("unable to unmarshal response body")) + + // Check the changelog for removal of /etc/passwd + success := false + for _, elem := range changes { + if elem.Path == "/etc/passwd" && elem.Kind == 2 { + success = true + } + } + c.Assert(success, checker.True, check.Commentf("/etc/passwd has been removed but is not present in the diff")) +} + +func (s *DockerSuite) TestGetContainerStats(c *check.C) { + var ( + name = "statscontainer" + ) + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := request.SockRequest("GET", "/containers/"+name+"/stats", nil, daemonHost()) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + var s *types.Stats + // decode only one object from the stream + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { + out := runSleepingContainer(c) + id := strings.TrimSpace(out) + + buf := &testutil.ChannelBuffer{C: make(chan []byte, 1)} + defer buf.Close() + + _, body, err := request.Get("/containers/"+id+"/stats?stream=1", request.JSON) + c.Assert(err, checker.IsNil) + defer body.Close() + + chErr := make(chan error, 1) + go func() { + _, err = io.Copy(buf, body) + chErr <- err + }() + + b := make([]byte, 32) + // make sure we've got some stats + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + // Now remove without `-f` and make sure we are still pulling stats + _, _, err = dockerCmdWithError("rm", id) + c.Assert(err, checker.Not(checker.IsNil), check.Commentf("rm should have failed but didn't")) + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", id) + c.Assert(<-chErr, checker.IsNil) +} + +// regression test for gh13421 +// previous test was just checking one stat entry so it didn't fail (stats with +// stream false always return one stat) +func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body io.ReadCloser + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := request.Get("/containers/" + name + "/stats") + bc <- b{status.StatusCode, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + b, err := ioutil.ReadAll(sr.body) + c.Assert(err, checker.IsNil) + s := string(b) + // count occurrences of "read" of types.Stats + if l := strings.Count(s, "read"); l < 2 { + c.Fatalf("Expected more than one stat streamed, got %d", l) + } + } +} + +func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := request.SockRequest("GET", "/containers/"+name+"/stats?stream=0", nil, daemonHost()) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of `"read"` of types.Stats + c.Assert(strings.Count(s, `"read"`), checker.Equals, 1, check.Commentf("Expected only one stat streamed, got %d", strings.Count(s, `"read"`))) + } +} + +func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { + name := "statscontainer" + dockerCmd(c, "create", "--name", name, "busybox", "ps") + + type stats struct { + status int + err error + } + chResp := make(chan stats) + + // We expect an immediate response, but if it's not immediate, the test would hang, so put it in a goroutine + // below we'll check this on a timeout. + go func() { + resp, body, err := request.Get("/containers/" + name + "/stats") + body.Close() + chResp <- stats{resp.StatusCode, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.status, checker.Equals, http.StatusOK) + case <-time.After(10 * time.Second): + c.Fatal("timeout waiting for stats response for stopped container") + } +} + +func (s *DockerSuite) TestContainerAPIPause(c *check.C) { + // Problematic on Windows as Windows does not support pause + testRequires(c, DaemonIsLinux) + + getPaused := func(c *check.C) []string { + return strings.Fields(cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined()) + } + + out := cli.DockerCmd(c, "run", "-d", "busybox", "sleep", "30").Combined() + ContainerID := strings.TrimSpace(out) + + resp, _, err := request.Post("/containers/" + ContainerID + "/pause") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) + + pausedContainers := getPaused(c) + + if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + } + + resp, _, err = request.Post("/containers/" + ContainerID + "/unpause") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) + + pausedContainers = getPaused(c) + c.Assert(pausedContainers, checker.HasLen, 0, check.Commentf("There should be no paused container.")) +} + +func (s *DockerSuite) TestContainerAPITop(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := request.SockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { + c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) + } + c.Assert(top.Processes, checker.HasLen, 2, check.Commentf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes)) + c.Assert(top.Processes[0][10], checker.Equals, "/bin/sh -c top") + c.Assert(top.Processes[1][10], checker.Equals, "top") +} + +func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + out := runSleepingContainer(c, "-d") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := request.SockRequest("GET", "/containers/"+id+"/top", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 4, check.Commentf("expected 4 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "Name" || top.Titles[3] != "Private Working Set" { + c.Fatalf("expected `Name` at `Titles[0]` and `Private Working Set` at Titles[3]: %v", top.Titles) + } + c.Assert(len(top.Processes), checker.GreaterOrEqualThan, 2, check.Commentf("expected at least 2 processes, found %d: %v", len(top.Processes), top.Processes)) + + foundProcess := false + expectedProcess := "busybox.exe" + for _, process := range top.Processes { + if process[0] == expectedProcess { + foundProcess = true + break + } + } + + c.Assert(foundProcess, checker.Equals, true, check.Commentf("expected to find %s: %v", expectedProcess, top.Processes)) +} + +func (s *DockerSuite) TestContainerAPICommit(c *check.C) { + cName := "testapicommit" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + name := "testcontainerapicommit" + status, b, err := request.SockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) { + cName := "testapicommitwithconfig" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + config := map[string]interface{}{ + "Labels": map[string]string{"key1": "value1", "key2": "value2"}, + } + + name := "testcontainerapicommitwithconfig" + status, b, err := request.SockRequest("POST", "/commit?repo="+name+"&container="+cName, config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1") + c.Assert(label1, checker.Equals, "value1") + + label2 := inspectFieldMap(c, img.ID, "Config.Labels", "key2") + c.Assert(label2, checker.Equals, "value2") + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPIBadPort(c *check.C) { + // TODO Windows to Windows CI - Port this test + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "echo test"}, + "PortBindings": map[string]interface{}{ + "8080/tcp": []map[string]interface{}{ + { + "HostIP": "", + "HostPort": "aa80", + }, + }, + }, + } + + jsonData := bytes.NewBuffer(nil) + json.NewEncoder(jsonData).Encode(config) + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Equals, `invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body)) +} + +func (s *DockerSuite) TestContainerAPICreate(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, + } + + status, b, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + + out, _ := dockerCmd(c, "start", "-a", container.ID) + c.Assert(strings.TrimSpace(out), checker.Equals, "/test") +} + +func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *check.C) { + config := map[string]interface{}{} + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + expected := "Config cannot be empty in order to create a container" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) { + // Container creation must fail if client specified configurations for more than one network + config := map[string]interface{}{ + "Image": "busybox", + "NetworkingConfig": networktypes.NetworkingConfig{ + EndpointsConfig: map[string]*networktypes.EndpointSettings{ + "net1": {}, + "net2": {}, + "net3": {}, + }, + }, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + msg := getErrorMessage(c, body) + // network name order in error message is not deterministic + c.Assert(msg, checker.Contains, "Container cannot be connected to network endpoints") + c.Assert(msg, checker.Contains, "net1") + c.Assert(msg, checker.Contains, "net2") + c.Assert(msg, checker.Contains, "net3") +} + +func (s *DockerSuite) TestContainerAPICreateWithHostName(c *check.C) { + domainName := "test-domain" + hostName := "test-hostname" + config := map[string]interface{}{ + "Image": "busybox", + "Hostname": hostName, + "Domainname": domainName, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) + c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname")) +} + +func (s *DockerSuite) TestContainerAPICreateBridgeNetworkMode(c *check.C) { + // Windows does not support bridge + testRequires(c, DaemonIsLinux) + UtilCreateNetworkMode(c, "bridge") +} + +func (s *DockerSuite) TestContainerAPICreateOtherNetworkModes(c *check.C) { + // Windows does not support these network modes + testRequires(c, DaemonIsLinux, NotUserNamespace) + UtilCreateNetworkMode(c, "host") + UtilCreateNetworkMode(c, "container:web1") +} + +func UtilCreateNetworkMode(c *check.C, networkMode string) { + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode")) +} + +func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *check.C) { + // TODO Windows to Windows CI. The CpuShares part could be ported. + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "CpuShares": 512, + "CpusetCpus": "0", + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + + out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares") + c.Assert(out, checker.Equals, "512") + + outCpuset := inspectField(c, containerJSON.ID, "HostConfig.CpusetCpus") + c.Assert(outCpuset, checker.Equals, "0") +} + +func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + } + + create := func(ct string) (*http.Response, io.ReadCloser, error) { + jsonData := bytes.NewBuffer(nil) + c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil) + return request.Post("/containers/create", request.RawContent(ioutil.NopCloser(jsonData)), request.ContentType(ct)) + } + + // Try with no content-type + res, body, err := create("") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // Try with wrong content-type + res, body, err = create("application/xml") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // now application/json + res, body, err = create("application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + body.Close() +} + +//Issue 14230. daemon should return 500 for invalid port syntax +func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "NetworkMode": "default", + "PortBindings": { + "19039;1230": [ + {} + ] + } + } + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid port") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "something", + "MaximumRetryCount": 0 + } + } + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "always", + "MaximumRetryCount": 2 + } + } + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": -2 + } + } + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": 0 + } + } + }` + + res, _, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) { + config := `{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"busybox", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + out := inspectField(c, container.ID, "HostConfig.CpusetCpus") + c.Assert(out, checker.Equals, "") + + outMemory := inspectField(c, container.ID, "HostConfig.Memory") + c.Assert(outMemory, checker.Equals, "0") + outMemorySwap := inspectField(c, container.ID, "HostConfig.MemorySwap") + c.Assert(outMemorySwap, checker.Equals, "0") +} + +func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + config := `{ + "Image": "busybox", + "Cmd": "ls", + "OpenStdin": true, + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + b, err2 := testutil.ReadBody(body) + c.Assert(err2, checker.IsNil) + + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +func (s *DockerSuite) TestContainerAPIRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "TestContainerAPIRename", "-d", "busybox", "sh") + + containerID := strings.TrimSpace(out) + newName := "TestContainerAPIRenameNew" + statusCode, _, err := request.SockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil, daemonHost()) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + + name := inspectField(c, containerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) +} + +func (s *DockerSuite) TestContainerAPIKill(c *check.C) { + name := "test-api-kill" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/kill", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + state := inspectField(c, name, "State.Running") + c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state)) +} + +func (s *DockerSuite) TestContainerAPIRestart(c *check.C) { + name := "test-api-restart" + runSleepingContainer(c, "-di", "--name", name) + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/restart?t=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) { + name := "test-api-restart-no-timeout-param" + out := runSleepingContainer(c, "-di", "--name", name) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/restart", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIStart(c *check.C) { + name := "testing-start" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + + status, _, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = request.SockRequest("POST", "/containers/"+name+"/start", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + // second call to start should give 304 + status, _, err = request.SockRequest("POST", "/containers/"+name+"/start", nil, daemonHost()) + c.Assert(err, checker.IsNil) + + // TODO(tibor): figure out why this doesn't work on windows + if testEnv.LocalDaemon() { + c.Assert(status, checker.Equals, http.StatusNotModified) + } +} + +func (s *DockerSuite) TestContainerAPIStop(c *check.C) { + name := "test-api-stop" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/stop?t=30", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + // second call to start should give 304 + status, _, err = request.SockRequest("POST", "/containers/"+name+"/stop?t=30", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotModified) +} + +func (s *DockerSuite) TestContainerAPIWait(c *check.C) { + name := "test-api-wait" + + sleepCmd := "/bin/sleep" + if testEnv.DaemonPlatform() == "windows" { + sleepCmd = "sleep" + } + dockerCmd(c, "run", "--name", name, "busybox", sleepCmd, "2") + + status, body, err := request.SockRequest("POST", "/containers/"+name+"/wait", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + var waitres containertypes.ContainerWaitOKBody + c.Assert(json.Unmarshal(body, &waitres), checker.IsNil) + c.Assert(waitres.StatusCode, checker.Equals, int64(0)) +} + +func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) { + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, _, err := request.SockRequest("POST", "/containers/"+name+"/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, body, err := request.SockRequest("POST", "/v1.23/containers/"+name+"/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if h.Name == "test.txt" { + found = true + break + } + } + c.Assert(found, checker.True) +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-empty" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "", + } + + status, body, err := request.SockRequest("POST", "/v1.23/containers/"+name+"/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Path cannot be empty\n") +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-not-found" + dockerCmd(c, "run", "--name", name, "busybox") + + postData := types.CopyConfig{ + Resource: "/notexist", + } + + status, body, err := request.SockRequest("POST", "/v1.23/containers/"+name+"/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Could not find the file /notexist in container "+name+"\n") +} + +func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + postData := types.CopyConfig{ + Resource: "/something", + } + + status, _, err := request.SockRequest("POST", "/v1.23/containers/notexists/copy", postData, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPIDelete(c *check.C) { + out := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + dockerCmd(c, "stop", id) + + status, _, err := request.SockRequest("DELETE", "/containers/"+id, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) { + status, body, err := request.SockRequest("DELETE", "/containers/doesnotexist", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(getErrorMessage(c, body), checker.Matches, "No such container: doesnotexist") +} + +func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) { + out := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := request.SockRequest("DELETE", "/containers/"+id+"?force=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top") + + id2 := strings.TrimSpace(out) + c.Assert(waitRun(id2), checker.IsNil) + + links := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers")) + + status, b, err := request.SockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b))) + + linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links")) +} + +func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) { + out := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := request.SockRequest("DELETE", "/containers/"+id, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + vol := "/testvolume" + if testEnv.DaemonPlatform() == "windows" { + vol = `c:\testvolume` + } + + out := runSleepingContainer(c, "-v", vol) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + source, err := inspectMountSourceField(id, vol) + _, err = os.Stat(source) + c.Assert(err, checker.IsNil) + + status, _, err := request.SockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + _, err = os.Stat(source) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err)) +} + +// Regression test for https://github.com/docker/docker/issues/6231 +func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) { + + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + + resp, _, err := request.Post("/containers/create", request.JSONBody(config), func(req *http.Request) error { + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + return nil + }) + c.Assert(err, checker.IsNil, check.Commentf("error creating container with chunked encoding")) + defer resp.Body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) +} + +func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) { + out := runSleepingContainer(c) + + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + statusCode, _, err := request.SockRequest("POST", "/containers/"+containerID+"/stop", nil, daemonHost()) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) +} + +// #14170 +func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd []string + }{"busybox", "echo", []string{"hello", "world"}} + _, _, err := request.SockRequest("POST", "/containers/create?name=echotest", config, daemonHost()) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Entrypoint []string + Cmd []string + }{"busybox", []string{"echo"}, []string{"hello", "world"}} + _, _, err = request.SockRequest("POST", "/containers/create?name=echotest2", config2, daemonHost()) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// #14170 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd string + }{"busybox", "echo", "hello world"} + _, _, err := request.SockRequest("POST", "/containers/create?name=echotest", config, daemonHost()) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Cmd []string + }{"busybox", []string{"echo", "hello", "world"}} + _, _, err = request.SockRequest("POST", "/containers/create?name=echotest2", config2, daemonHost()) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// regression #14318 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) { + // Windows doesn't support CapAdd/CapDrop + testRequires(c, DaemonIsLinux) + config := struct { + Image string + CapAdd string + CapDrop string + }{"busybox", "NET_ADMIN", "SYS_ADMIN"} + status, _, err := request.SockRequest("POST", "/containers/create?name=capaddtest0", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config2 := struct { + Image string + CapAdd []string + CapDrop []string + }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} + status, _, err = request.SockRequest("POST", "/containers/create?name=capaddtest1", config2, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// #14915 +func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only support 1.25 or later + config := struct { + Image string + }{"busybox"} + status, _, err := request.SockRequest("POST", "/v1.18/containers/create", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// Ensure an error occurs when you have a container read-only rootfs but you +// extract an archive to a symlink in a writable volume which points to a +// directory outside of the volume. +func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) { + // Windows does not support read-only rootfs + // Requires local volume mount bind. + // --read-only + userns has remount issues + testRequires(c, SameHostDaemon, NotUserNamespace, DaemonIsLinux) + + testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + cID := makeTestContainer(c, testContainerOptions{ + readOnly: true, + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + + // Attempt to extract to a symlink in the volume which points to a + // directory outside the volume. This should cause an error because the + // rootfs is read-only. + query := make(url.Values, 1) + query.Set("path", "/vol2/symlinkToAbsDir") + urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) + + statusCode, body, err := request.SockRequest("PUT", urlPath, nil, daemonHost()) + c.Assert(err, checker.IsNil) + + if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { + c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) + } +} + +func (s *DockerSuite) TestContainerAPIGetContainersJSONEmpty(c *check.C) { + status, body, err := request.SockRequest("GET", "/containers/json?all=1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(string(body), checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { + // Not supported on Windows + testRequires(c, DaemonIsLinux) + + c1 := struct { + Image string + CpusetCpus string + }{"busybox", "1-42,,"} + name := "wrong-cpuset-cpus" + status, body, err := request.SockRequest("POST", "/containers/create?name="+name, c1, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected := "Invalid value 1-42,, for cpuset cpus" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + c2 := struct { + Image string + CpusetMems string + }{"busybox", "42-3,1--"} + name = "wrong-cpuset-mems" + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, c2, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected = "Invalid value 42-3,1-- for cpuset mems" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"ShmSize": -1}, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Contains, "SHM size can not be less than 0") +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + var defaultSHMSize int64 = 67108864 + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{}, + "Cmd": "mount", + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + "HostConfig": map[string]interface{}{"ShmSize": 1073741824}, + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) { + // Swappiness is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + } + + status, body, err := request.SockRequest("POST", "/containers/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = request.SockRequest("GET", "/containers/"+container.ID+"/json", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) +} + +// check validation is done daemon side and not only in cli +func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *check.C) { + // OomScoreAdj is not supported on Windows + testRequires(c, DaemonIsLinux) + + config := struct { + Image string + OomScoreAdj int + }{"busybox", 1001} + name := "oomscoreadj-over" + status, b, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]" + msg := getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } + + config = struct { + Image string + OomScoreAdj int + }{"busybox", -1001} + name = "oomscoreadj-low" + status, b, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]" + msg = getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } +} + +// test case for #22210 where an empty container name caused panic. +func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) { + status, out, err := request.SockRequest("DELETE", "/containers/", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(out), checker.Contains, "No container name or ID supplied") +} + +func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { + // Problematic on Windows as Windows does not support stats + testRequires(c, DaemonIsLinux) + + name := "testing-network-disabled" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"top"}, + "NetworkDisabled": true, + } + + status, _, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = request.SockRequest("POST", "/containers/"+name+"/start", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + c.Assert(waitRun(name), check.IsNil) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := request.SockRequest("GET", "/containers/"+name+"/stats", nil, daemonHost()) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + // decode only one object from the stream + var s *types.Stats + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) { + type m mounttypes.Mount + type hc struct{ Mounts []m } + type cfg struct { + Image string + HostConfig hc + } + type testCase struct { + config cfg + status int + msg string + } + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + notExistPath := prefix + slash + "notexist" + + cases := []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "notreal", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "mount type unknown", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: notExistPath, + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "bind source path does not exist", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello2", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local"}}}}}}, + status: http.StatusCreated, + msg: "", + }, + } + + if SameHostDaemon() { + tmpDir, err := ioutils.TempDir("", "test-mounts-api") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{}}}}}, + status: http.StatusBadRequest, + msg: "VolumeOptions must not be specified", + }, + }...) + } + + if DaemonIsLinux() { + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello3", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local", + Options: map[string]string{"o": "size=1"}}}}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath, + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 4096 * 1024, + Mode: 0700, + }}}}}, + status: http.StatusCreated, + msg: "", + }, + + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Source: "/shouldnotbespecified", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be specified", + }, + }...) + + } + + for i, x := range cases { + c.Logf("case %d", i) + status, b, err := request.SockRequest("POST", "/containers/create", x.config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, x.status, check.Commentf("%s\n%v", string(b), cases[i].config)) + if len(x.msg) > 0 { + c.Assert(string(b), checker.Contains, x.msg, check.Commentf("%v", cases[i].config)) + } + } +} + +func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *check.C) { + testRequires(c, NotUserNamespace, SameHostDaemon) + // also with data in the host side + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + tmpDir, err := ioutil.TempDir("", "test-mounts-api-bind") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("hello"), 666) + c.Assert(err, checker.IsNil) + + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "cat /foo/bar"}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{{"Type": "bind", "Source": tmpDir, "Target": destPath}}}, + } + status, resp, err := request.SockRequest("POST", "/containers/create?name=test", data, daemonHost()) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + + out, _ := dockerCmd(c, "start", "-a", "test") + c.Assert(out, checker.Equals, "hello") +} + +// Test Mounts comes out as expected for the MountPoint +func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + + var ( + testImg string + ) + if testEnv.DaemonPlatform() != "windows" { + testImg = "test-mount-config" + buildImageSuccessfully(c, testImg, build.WithDockerfile(` + FROM busybox + RUN mkdir `+destPath+` && touch `+destPath+slash+`bar + CMD cat `+destPath+slash+`bar + `)) + } else { + testImg = "busybox" + } + + type testCase struct { + cfg mounttypes.Mount + expected types.MountPoint + } + + cases := []testCase{ + // use literal strings here for `Type` instead of the defined constants in the volume package to keep this honest + // Validation of the actual `Mount` struct is done in another test is not needed here + {mounttypes.Mount{Type: "volume", Target: destPath}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}}, + } + + if SameHostDaemon() { + // setup temp dir for testing binds + tmpDir1, err := ioutil.TempDir("", "test-mounts-api-1") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir1) + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}}, + }...) + + // for modes only supported on Linux + if DaemonIsLinux() { + tmpDir3, err := ioutils.TempDir("", "test-mounts-api-3") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir3) + + c.Assert(mount.Mount(tmpDir3, tmpDir3, "none", "bind,rw"), checker.IsNil) + c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil) + + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}}, + }...) + } + } + + if testEnv.DaemonPlatform() != "windows" { // Windows does not support volume populate + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath}}, + }...) + } + + type wrapper struct { + containertypes.Config + HostConfig containertypes.HostConfig + } + type createResp struct { + ID string `json:"Id"` + } + for i, x := range cases { + c.Logf("case %d - config: %v", i, x.cfg) + status, data, err := request.SockRequest("POST", "/containers/create", wrapper{containertypes.Config{Image: testImg}, containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}}, daemonHost()) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(data))) + + var resp createResp + err = json.Unmarshal(data, &resp) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + id := resp.ID + + var mps []types.MountPoint + err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps) + c.Assert(err, checker.IsNil) + c.Assert(mps, checker.HasLen, 1) + c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination) + + if len(x.expected.Source) > 0 { + c.Assert(mps[0].Source, checker.Equals, x.expected.Source) + } + if len(x.expected.Name) > 0 { + c.Assert(mps[0].Name, checker.Equals, x.expected.Name) + } + if len(x.expected.Driver) > 0 { + c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver) + } + c.Assert(mps[0].RW, checker.Equals, x.expected.RW) + c.Assert(mps[0].Type, checker.Equals, x.expected.Type) + c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode) + if len(x.expected.Propagation) > 0 { + c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation) + } + + out, _, err := dockerCmdWithError("start", "-a", id) + if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && testEnv.DaemonPlatform() != "windows" { + c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0])) + } else { + c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0])) + } + + dockerCmd(c, "rm", "-fv", id) + if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 { + // This should still exist even though we removed the container + dockerCmd(c, "volume", "inspect", mps[0].Name) + } else { + // This should be removed automatically when we removed the container + out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + } + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) { + testRequires(c, DaemonIsLinux) + type testCase struct { + cfg map[string]interface{} + expectedOptions []string + } + target := "/foo" + cases := []testCase{ + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + }, + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target, + "TmpfsOptions": map[string]interface{}{ + "SizeBytes": 4096 * 1024, "Mode": 0700}}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k", "mode=700"}, + }, + } + + for i, x := range cases { + cName := fmt.Sprintf("test-tmpfs-%d", i) + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", + fmt.Sprintf("mount | grep 'tmpfs on %s'", target)}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{x.cfg}}, + } + status, resp, err := request.SockRequest("POST", "/containers/create?name="+cName, data, daemonHost()) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + out, _ := dockerCmd(c, "start", "-a", cName) + for _, option := range x.expectedOptions { + c.Assert(out, checker.Contains, option) + } + } +} + +// Regression test for #33334 +// Makes sure that when a container which has a custom stop signal + restart=always +// gets killed (with SIGKILL) by the kill API, that the restart policy is cancelled. +func (s *DockerSuite) TestContainerKillCustomStopSignal(c *check.C) { + id := strings.TrimSpace(runSleepingContainer(c, "--stop-signal=SIGTERM", "--restart=always")) + res, _, err := request.Post("/containers/" + id + "/kill") + c.Assert(err, checker.IsNil) + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent, check.Commentf(string(b))) + err = waitInspect(id, "{{.State.Running}} {{.State.Restarting}}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) +} diff --git a/integration-cli/docker_api_create_test.go b/integration-cli/docker_api_create_test.go new file mode 100644 index 0000000..e404b6c --- /dev/null +++ b/integration-cli/docker_api_create_test.go @@ -0,0 +1,171 @@ +package main + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPICreateWithNotExistImage(c *check.C) { + name := "test" + config := map[string]interface{}{ + "Image": "test456:v1", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected := "No such image: test456:v1" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + config2 := map[string]interface{}{ + "Image": "test456", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config2, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: test456:latest" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + config3 := map[string]interface{}{ + "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", + } + + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config3, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + +} + +// Test for #25099 +func (s *DockerSuite) TestAPICreateEmptyEnv(c *check.C) { + name := "test1" + config := map[string]interface{}{ + "Image": "busybox", + "Env": []string{"", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + + status, body, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected := "invalid environment variable:" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test2" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=foo", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =foo" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +func (s *DockerSuite) TestAPICreateWithInvalidHealthcheckParams(c *check.C) { + // test invalid Interval in Healthcheck: less than 0s + name := "test1" + config := map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": -10 * time.Millisecond, + "Timeout": time.Second, + "Retries": int(1000), + }, + } + + status, body, err := request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected := fmt.Sprintf("Interval in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid Interval in Healthcheck: larger than 0s but less than 1ms + name = "test2" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": 500 * time.Microsecond, + "Timeout": time.Second, + "Retries": int(1000), + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid Timeout in Healthcheck: less than 1ms + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": -100 * time.Millisecond, + "Retries": int(1000), + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = fmt.Sprintf("Timeout in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid Retries in Healthcheck: less than 0 + name = "test4" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": time.Second, + "Retries": int(-10), + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "Retries in Healthcheck cannot be negative" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + // test invalid StartPeriod in Healthcheck: not 0 and less than 1ms + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": time.Second, + "Retries": int(1000), + "StartPeriod": 100 * time.Microsecond, + }, + } + status, body, err = request.SockRequest("POST", "/containers/create?name="+name, config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = fmt.Sprintf("StartPeriod in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} diff --git a/integration-cli/docker_api_events_test.go b/integration-cli/docker_api_events_test.go new file mode 100644 index 0000000..a95422f --- /dev/null +++ b/integration-cli/docker_api_events_test.go @@ -0,0 +1,74 @@ +package main + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsAPIEmptyOutput(c *check.C) { + type apiResp struct { + resp *http.Response + err error + } + chResp := make(chan *apiResp) + go func() { + resp, body, err := request.Get("/events") + body.Close() + chResp <- &apiResp{resp, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for events api to respond, should have responded immediately") + } +} + +func (s *DockerSuite) TestEventsAPIBackwardsCompatible(c *check.C) { + since := daemonTime(c).Unix() + ts := strconv.FormatInt(since, 10) + + out := runSleepingContainer(c, "--name=foo", "-d") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + q := url.Values{} + q.Set("since", ts) + + _, body, err := request.Get("/events?" + q.Encode()) + c.Assert(err, checker.IsNil) + defer body.Close() + + dec := json.NewDecoder(body) + var containerCreateEvent *jsonmessage.JSONMessage + for { + var event jsonmessage.JSONMessage + if err := dec.Decode(&event); err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if event.Status == "create" && event.ID == containerID { + containerCreateEvent = &event + break + } + } + + c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) + c.Assert(containerCreateEvent.Status, checker.Equals, "create") + c.Assert(containerCreateEvent.ID, checker.Equals, containerID) + c.Assert(containerCreateEvent.From, checker.Equals, "busybox") +} diff --git a/integration-cli/docker_api_exec_resize_test.go b/integration-cli/docker_api_exec_resize_test.go new file mode 100644 index 0000000..f43bc2d --- /dev/null +++ b/integration-cli/docker_api_exec_resize_test.go @@ -0,0 +1,104 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) +} + +// Part of #14845 +func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { + name := "exec_resize_test" + dockerCmd(c, "run", "-d", "-i", "-t", "--name", name, "--restart", "always", "busybox", "/bin/sh") + + testExecResize := func() error { + data := map[string]interface{}{ + "AttachStdin": true, + "Cmd": []string{"/bin/sh"}, + } + uri := fmt.Sprintf("/containers/%s/exec", name) + status, body, err := request.SockRequest("POST", uri, data, daemonHost()) + if err != nil { + return err + } + if status != http.StatusCreated { + return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, status) + } + + out := map[string]string{} + err = json.Unmarshal(body, &out) + if err != nil { + return fmt.Errorf("ExecCreate returned invalid json. Error: %q", err.Error()) + } + + execID := out["Id"] + if len(execID) < 1 { + return fmt.Errorf("ExecCreate got invalid execID") + } + + payload := bytes.NewBufferString(`{"Tty":true}`) + conn, _, err := request.SockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json", daemonHost()) + if err != nil { + return fmt.Errorf("Failed to start the exec: %q", err.Error()) + } + defer conn.Close() + + _, rc, err := request.Post(fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), request.ContentType("text/plain")) + // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. + if err == io.ErrUnexpectedEOF { + return fmt.Errorf("The daemon might have crashed.") + } + + if err == nil { + rc.Close() + } + + // We only interested in the io.ErrUnexpectedEOF error, so we return nil otherwise. + return nil + } + + // The panic happens when daemon.ContainerExecStart is called but the + // container.Exec is not called. + // Because the panic is not 100% reproducible, we send the requests concurrently + // to increase the probability that the problem is triggered. + var ( + n = 10 + ch = make(chan error, n) + wg sync.WaitGroup + ) + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if err := testExecResize(); err != nil { + ch <- err + } + }() + } + + wg.Wait() + select { + case err := <-ch: + c.Fatal(err.Error()) + default: + } +} diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go new file mode 100644 index 0000000..2539934 --- /dev/null +++ b/integration-cli/docker_api_exec_test.go @@ -0,0 +1,249 @@ +// +build !test_no_exec + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +// Regression test for #9414 +func (s *DockerSuite) TestExecAPICreateNoCmd(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := request.SockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + comment := check.Commentf("Expected message when creating exec command with no Cmd specified") + c.Assert(getErrorMessage(c, body), checker.Contains, "No exec command specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(map[string]interface{}{"Cmd": nil}); err != nil { + c.Fatalf("Can not encode data to json %s", err) + } + + res, body, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.RawContent(ioutil.NopCloser(jsonData)), request.ContentType("test/plain")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") + c.Assert(getErrorMessage(c, b), checker.Contains, "Content-Type specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateContainerPaused(c *check.C) { + // Not relevant on Windows as Windows containers cannot be paused + testRequires(c, DaemonIsLinux) + name := "exec_create_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + dockerCmd(c, "pause", name) + status, body, err := request.SockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + comment := check.Commentf("Expected message when creating exec command with Container %s is paused", name) + c.Assert(getErrorMessage(c, body), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) +} + +func (s *DockerSuite) TestExecAPIStart(c *check.C) { + testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvageable to Windows to Windows CI + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + startExec(c, id, http.StatusOK) + + var execJSON struct{ PID int } + inspectExec(c, id, &execJSON) + c.Assert(execJSON.PID, checker.GreaterThan, 1) + + id = createExec(c, "test") + dockerCmd(c, "stop", "test") + + startExec(c, id, http.StatusNotFound) + + dockerCmd(c, "start", "test") + startExec(c, id, http.StatusNotFound) + + // make sure exec is created before pausing + id = createExec(c, "test") + dockerCmd(c, "pause", "test") + startExec(c, id, http.StatusConflict) + dockerCmd(c, "unpause", "test") + startExec(c, id, http.StatusOK) +} + +func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + resp, _, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Server"), checker.Not(checker.Equals), "") +} + +func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "-d", "--name", "test") + id := createExec(c, "test") + + resp, body, err := request.Post(fmt.Sprintf("/v1.20/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.ContentType("text/plain")) + c.Assert(err, checker.IsNil) + + b, err := testutil.ReadBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) +} + +// #19362 +func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { + runSleepingContainer(c, "-d", "--name", "test") + execID := createExec(c, "test") + startExec(c, execID, http.StatusOK) + waitForExec(c, execID) + + startExec(c, execID, http.StatusConflict) +} + +// #20638 +func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) { + name := "foo" + runSleepingContainer(c, "-d", "-t", "--name", name) + data := map[string]interface{}{ + "cmd": []string{"true"}, + "AttachStdin": true, + } + _, b, err := request.SockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data, daemonHost()) + c.Assert(err, checker.IsNil, check.Commentf(string(b))) + + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + + _, body, err := request.Post(fmt.Sprintf("/exec/%s/start", createResp.ID), request.RawString(`{"Detach": true}`), request.JSON) + c.Assert(err, checker.IsNil) + + b, err = testutil.ReadBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + + resp, _, err := request.Get("/_ping") + c.Assert(err, checker.IsNil) + if resp.StatusCode != http.StatusOK { + c.Fatal("daemon is down, it should alive") + } +} + +// #30311 +func (s *DockerSuite) TestExecAPIStartValidCommand(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + id := createExecCmd(c, name, "true") + startExec(c, id, http.StatusOK) + + waitForExec(c, id) + + var inspectJSON struct{ ExecIDs []string } + inspectContainer(c, name, &inspectJSON) + + c.Assert(inspectJSON.ExecIDs, checker.IsNil) +} + +// #30311 +func (s *DockerSuite) TestExecAPIStartInvalidCommand(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + id := createExecCmd(c, name, "invalid") + startExec(c, id, http.StatusNotFound) + waitForExec(c, id) + + var inspectJSON struct{ ExecIDs []string } + inspectContainer(c, name, &inspectJSON) + + c.Assert(inspectJSON.ExecIDs, checker.IsNil) +} + +func createExec(c *check.C, name string) string { + return createExecCmd(c, name, "true") +} + +func createExecCmd(c *check.C, name string, cmd string) string { + _, reader, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": []string{cmd}})) + c.Assert(err, checker.IsNil) + b, err := ioutil.ReadAll(reader) + c.Assert(err, checker.IsNil) + defer reader.Close() + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + return createResp.ID +} + +func startExec(c *check.C, id string, code int) { + resp, body, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON) + c.Assert(err, checker.IsNil) + + b, err := testutil.ReadBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, code, comment) +} + +func inspectExec(c *check.C, id string, out interface{}) { + resp, body, err := request.Get(fmt.Sprintf("/exec/%s/json", id)) + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} + +func waitForExec(c *check.C, id string) { + timeout := time.After(60 * time.Second) + var execJSON struct{ Running bool } + for { + select { + case <-timeout: + c.Fatal("timeout waiting for exec to start") + default: + } + + inspectExec(c, id, &execJSON) + if !execJSON.Running { + break + } + } +} + +func inspectContainer(c *check.C, id string, out interface{}) { + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/json", id)) + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} diff --git a/integration-cli/docker_api_images_test.go b/integration-cli/docker_api_images_test.go new file mode 100644 index 0000000..d44b307 --- /dev/null +++ b/integration-cli/docker_api_images_test.go @@ -0,0 +1,192 @@ +package main + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIImagesFilter(c *check.C) { + name := "utest:tag1" + name2 := "utest/docker:tag2" + name3 := "utest:5000/docker:tag3" + for _, n := range []string{name, name2, name3} { + dockerCmd(c, "tag", "busybox", n) + } + type image types.ImageSummary + getImages := func(filter string) []image { + v := url.Values{} + v.Set("filter", filter) + status, b, err := request.SockRequest("GET", "/images/json?"+v.Encode(), nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var images []image + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + + return images + } + + //incorrect number of matches returned + images := getImages("utest*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 2) + + images = getImages("utest") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("utest*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("*5000*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) +} + +func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *check.C) { + // TODO Windows to Windows CI: Investigate further why this test fails. + testRequires(c, Network) + testRequires(c, DaemonIsLinux) + buildImageSuccessfully(c, "saveandload", build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, "saveandload") + + res, body, err := request.Get("/images/" + id + "/get") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + dockerCmd(c, "rmi", id) + + res, loadBody, err := request.Post("/images/load", request.RawContent(body), request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + defer loadBody.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + inspectOut := cli.InspectCmd(c, id, cli.Format(".Id")).Combined() + c.Assert(strings.TrimSpace(string(inspectOut)), checker.Equals, id, check.Commentf("load did not work properly")) +} + +func (s *DockerSuite) TestAPIImagesDelete(c *check.C) { + if testEnv.DaemonPlatform() != "windows" { + testRequires(c, Network) + } + name := "test-api-images-delete" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, name) + + dockerCmd(c, "tag", name, "test:tag1") + + status, _, err := request.SockRequest("DELETE", "/images/"+id, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + status, _, err = request.SockRequest("DELETE", "/images/test:noexist", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) //Status Codes:404 – no such image + + status, _, err = request.SockRequest("DELETE", "/images/test:tag1", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIImagesHistory(c *check.C) { + if testEnv.DaemonPlatform() != "windows" { + testRequires(c, Network) + } + name := "test-api-images-history" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, name) + + status, body, err := request.SockRequest("GET", "/images/"+id+"/history", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var historydata []image.HistoryResponseItem + err = json.Unmarshal(body, &historydata) + c.Assert(err, checker.IsNil, check.Commentf("Error on unmarshal")) + + c.Assert(historydata, checker.Not(checker.HasLen), 0) + c.Assert(historydata[0].Tags[0], checker.Equals, "test-api-images-history:latest") +} + +func (s *DockerSuite) TestAPIImagesImportBadSrc(c *check.C) { + testRequires(c, Network) + + server := httptest.NewServer(http.NewServeMux()) + defer server.Close() + + tt := []struct { + statusExp int + fromSrc string + }{ + {http.StatusNotFound, server.URL + "/nofile.tar"}, + {http.StatusNotFound, strings.TrimPrefix(server.URL, "http://") + "/nofile.tar"}, + {http.StatusNotFound, strings.TrimPrefix(server.URL, "http://") + "%2Fdata%2Ffile.tar"}, + {http.StatusInternalServerError, "%2Fdata%2Ffile.tar"}, + } + + for _, te := range tt { + res, b, err := request.SockRequestRaw("POST", strings.Join([]string{"/images/create?fromSrc=", te.fromSrc}, ""), nil, "application/json", daemonHost()) + c.Assert(err, check.IsNil) + b.Close() + c.Assert(res.StatusCode, checker.Equals, te.statusExp) + c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") + } + +} + +// #14846 +func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) { + testRequires(c, Network) + + res, b, err := request.Get("/images/search?term=test", request.JSON) + c.Assert(err, check.IsNil) + b.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") +} + +// Test case for 30027: image size reported as -1 in v1.12 client against v1.13 daemon. +// This test checks to make sure both v1.12 and v1.13 client against v1.13 daemon get correct `Size` after the fix. +func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *check.C) { + status, b, err := request.SockRequest("GET", "/images/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var images []types.ImageSummary + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + c.Assert(len(images), checker.Not(checker.Equals), 0) + for _, image := range images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } + + type v124Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string + } + status, b, err = request.SockRequest("GET", "/v1.24/images/json", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var v124Images []v124Image + err = json.Unmarshal(b, &v124Images) + c.Assert(err, checker.IsNil) + c.Assert(len(v124Images), checker.Not(checker.Equals), 0) + for _, image := range v124Images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } +} diff --git a/integration-cli/docker_api_info_test.go b/integration-cli/docker_api_info_test.go new file mode 100644 index 0000000..67ef03c --- /dev/null +++ b/integration-cli/docker_api_info_test.go @@ -0,0 +1,83 @@ +package main + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoAPI(c *check.C) { + endpoint := "/info" + + status, body, err := request.SockRequest("GET", endpoint, nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + // always shown fields + stringsToCheck := []string{ + "ID", + "Containers", + "ContainersRunning", + "ContainersPaused", + "ContainersStopped", + "Images", + "LoggingDriver", + "OperatingSystem", + "NCPU", + "OSType", + "Architecture", + "MemTotal", + "KernelVersion", + "Driver", + "ServerVersion", + "SecurityOptions"} + + out := string(body) + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix) + } +} + +// TestInfoAPIRuncCommit tests that dockerd is able to obtain RunC version +// information, and that the version matches the expected version +func (s *DockerSuite) TestInfoAPIRuncCommit(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not have RunC version information + + res, body, err := request.Get("/v1.30/info") + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + var i types.Info + + c.Assert(json.Unmarshal(b, &i), checker.IsNil) + c.Assert(i.RuncCommit.ID, checker.Not(checker.Equals), "N/A") + c.Assert(i.RuncCommit.ID, checker.Equals, i.RuncCommit.Expected) + + version, err := ioutil.ReadFile("/go/src/github.com/docker/docker/vendor/github.com/opencontainers/runc/VERSION") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(version)), checker.Equals, os.Getenv("DOCKER_RCE_RUNC_VERSION")) +} + +func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + endpoint := "/v1.20/info" + + status, body, err := request.SockRequest("GET", endpoint, nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + out := string(body) + c.Assert(out, checker.Contains, "ExecutionDriver") + c.Assert(out, checker.Contains, "not supported") +} diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go new file mode 100644 index 0000000..f2aa883 --- /dev/null +++ b/integration-cli/docker_api_inspect_test.go @@ -0,0 +1,184 @@ +package main + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", + "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "MountLabel", "ProcessLabel", "GraphDriver"} + + type acase struct { + version string + keys []string + } + + var cases []acase + + if testEnv.DaemonPlatform() == "windows" { + cases = []acase{ + {"v1.25", append(keysBase, "Mounts")}, + } + + } else { + cases = []acase{ + {"v1.20", append(keysBase, "Mounts")}, + {"v1.19", append(keysBase, "Volumes", "VolumesRW")}, + } + } + + for _, cs := range cases { + body := getInspectBody(c, cs.version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", cs.version)) + + for _, key := range cs.keys { + _, ok := inspectJSON[key] + c.Check(ok, checker.True, check.Commentf("%s does not exist in response for version %s", key, cs.version)) + } + + //Issue #6830: type not properly converted to JSON/back + _, ok := inspectJSON["Path"].(bool) + c.Assert(ok, checker.False, check.Commentf("Path of `true` should not be converted to boolean `true` via JSON marshalling")) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriverLegacy(c *check.C) { + // No legacy implications for Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version %s expected to include VolumeDriver in 'Config'", version)) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--volume-driver", "local", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + body := getInspectBody(c, "v1.25", cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version 1.25")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.False, check.Commentf("API version 1.25 expected to not include VolumeDriver in 'Config'")) + + config, ok = inspectJSON["HostConfig"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'HostConfig'")) + cfg = config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version 1.25 expected to include VolumeDriver in 'HostConfig'")) +} + +func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "busybox:mytag") + + endpoint := "/images/busybox/json" + status, body, err := request.SockRequest("GET", endpoint, nil, daemonHost()) + + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var imageJSON types.ImageInspect + err = json.Unmarshal(body, &imageJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for latest version")) + c.Assert(imageJSON.RepoTags, checker.HasLen, 2) + + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:latest"), checker.Equals, true) + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:mytag"), checker.Equals, true) +} + +// #17131, #17139, #17173 +func (s *DockerSuite) TestInspectAPIEmptyFieldsInConfigPre121(c *check.C) { + // Not relevant on Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + for _, f := range []string{"MacAddress", "NetworkDisabled", "ExposedPorts"} { + _, ok := cfg[f] + c.Check(ok, checker.True, check.Commentf("API version %s expected to include %s in 'Config'", version, f)) + } + } +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings120(c *check.C) { + // Not relevant on Windows, and besides it doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.20", containerID) + + var inspectJSON v1p20.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings121(c *check.C) { + // Windows doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.21", containerID) + + var inspectJSON types.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) + c.Assert(settings.Networks["bridge"], checker.Not(checker.IsNil)) + c.Assert(settings.IPAddress, checker.Equals, settings.Networks["bridge"].IPAddress) +} diff --git a/integration-cli/docker_api_inspect_unix_test.go b/integration-cli/docker_api_inspect_unix_test.go new file mode 100644 index 0000000..f7731f3 --- /dev/null +++ b/integration-cli/docker_api_inspect_unix_test.go @@ -0,0 +1,36 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +// #16665 +func (s *DockerSuite) TestInspectAPICpusetInConfigPre120(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cgroupCpuset) + + name := "cpusetinconfig-pre120" + dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") + + status, body, err := request.SockRequest("GET", fmt.Sprintf("/v1.19/containers/%s/json", name), nil, daemonHost()) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON map[string]interface{} + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal body for version 1.19")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["Cpuset"] + c.Assert(ok, checker.True, check.Commentf("API version 1.19 expected to include Cpuset in 'Config'")) +} diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go new file mode 100644 index 0000000..5e953b7 --- /dev/null +++ b/integration-cli/docker_api_logs_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "bufio" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + type logOut struct { + out string + err error + } + + chLog := make(chan logOut) + res, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id)) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + go func() { + defer body.Close() + out, err := bufio.NewReader(body).ReadString('\n') + if err != nil { + chLog <- logOut{"", err} + return + } + chLog <- logOut{strings.TrimSpace(out), err} + }() + + select { + case l := <-chLog: + c.Assert(l.err, checker.IsNil) + if !strings.HasSuffix(l.out, "hello") { + c.Fatalf("expected log output to container 'hello', but it does not") + } + case <-time.After(30 * time.Second): + c.Fatal("timeout waiting for logs to exit") + } +} + +func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *check.C) { + name := "logs_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := request.SockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(err, checker.IsNil) + + expected := "Bad parameters: you must choose at least one stream" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +// Regression test for #12704 +func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) { + name := "logs_test" + t0 := time.Now() + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") + + _, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name)) + t1 := time.Now() + c.Assert(err, checker.IsNil) + body.Close() + elapsed := t1.Sub(t0).Seconds() + if elapsed > 20.0 { + c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed) + } +} + +func (s *DockerSuite) TestLogsAPIContainerNotFound(c *check.C) { + name := "nonExistentContainer" + resp, _, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name)) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) +} diff --git a/integration-cli/docker_api_network_test.go b/integration-cli/docker_api_network_test.go new file mode 100644 index 0000000..129ec7e --- /dev/null +++ b/integration-cli/docker_api_network_test.go @@ -0,0 +1,357 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPINetworkGetDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + // By default docker daemon creates 3 networks. check if they are present + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + c.Assert(isNetworkAvailable(c, nn), checker.Equals, true) + } +} + +func (s *DockerSuite) TestAPINetworkCreateDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create a network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + id := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // delete the network and make sure it is deleted + deleteNetwork(c, id, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testcheckduplicate" + configOnCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + configNotCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + // Creating a new network first + createNetwork(c, configOnCheck, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // Creating another network with same name and CheckDuplicate must fail + createNetwork(c, configOnCheck, false) + + // Creating another network with same name and not CheckDuplicate must succeed + createNetwork(c, configNotCheck, true) +} + +func (s *DockerSuite) TestAPINetworkFilter(c *check.C) { + testRequires(c, DaemonIsLinux) + nr := getNetworkResource(c, getNetworkIDByName(c, "bridge")) + c.Assert(nr.Name, checker.Equals, "bridge") +} + +func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Inspect default bridge network + nr := getNetworkResource(c, "bridge") + c.Assert(nr.Name, checker.Equals, "bridge") + + // run a container and attach it to the default bridge network + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + containerIP := findContainerIP(c, "test", "bridge") + + // inspect default bridge network again and make sure the container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + c.Assert(ip.String(), checker.Equals, containerIP) + + // IPAM configuration inspect + ipam := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "172.28.0.0/16", IPRange: "172.28.5.0/24", Gateway: "172.28.5.254"}}, + } + config := types.NetworkCreateRequest{ + Name: "br0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam, + Options: map[string]string{"foo": "bar", "opts": "dopts"}, + }, + } + id0 := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) + + nr = getNetworkResource(c, id0) + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Options["foo"], checker.Equals, "bar") + c.Assert(nr.Options["opts"], checker.Equals, "dopts") + + // delete the network and make sure it is deleted + deleteNetwork(c, id0, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create test network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + } + id := createNetwork(c, config, true) + nr := getNetworkResource(c, id) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(nr.ID, checker.Equals, id) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + + // connect the container to the test network + connectNetwork(c, nr.ID, containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + containerIP := findContainerIP(c, "test", "testnetwork") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + disconnectNetwork(c, nr.ID, containerID) + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // delete the network + deleteNetwork(c, nr.ID, true) +} + +func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + // test0 bridge network + ipam0 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.0.0/16", IPRange: "192.178.128.0/17", Gateway: "192.178.138.100"}}, + } + config0 := types.NetworkCreateRequest{ + Name: "test0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam0, + }, + } + id0 := createNetwork(c, config0, true) + c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true) + + ipam1 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.128.0/17", Gateway: "192.178.128.1"}}, + } + // test1 bridge network overlaps with test0 + config1 := types.NetworkCreateRequest{ + Name: "test1", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam1, + }, + } + createNetwork(c, config1, false) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false) + + ipam2 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.169.0.0/16", Gateway: "192.169.100.100"}}, + } + // test2 bridge network does not overlap + config2 := types.NetworkCreateRequest{ + Name: "test2", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam2, + }, + } + createNetwork(c, config2, true) + c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true) + + // remove test0 and retry to create test1 + deleteNetwork(c, id0, true) + createNetwork(c, config1, true) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true) + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, true) + c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, true) + c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, true) + c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true) + + for i := 1; i < 6; i++ { + deleteNetwork(c, fmt.Sprintf("test%d", i), true) + } +} + +func (s *DockerSuite) TestAPICreateDeletePredefinedNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + createDeletePredefinedNetwork(c, "bridge") + createDeletePredefinedNetwork(c, "none") + createDeletePredefinedNetwork(c, "host") +} + +func createDeletePredefinedNetwork(c *check.C, name string) { + // Create pre-defined network + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + shouldSucceed := false + createNetwork(c, config, shouldSucceed) + deleteNetwork(c, name, shouldSucceed) +} + +func isNetworkAvailable(c *check.C, name string) bool { + resp, body, err := request.Get("/networks") + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + + nJSON := []types.NetworkResource{} + err = json.NewDecoder(body).Decode(&nJSON) + c.Assert(err, checker.IsNil) + + for _, n := range nJSON { + if n.Name == name { + return true + } + } + return false +} + +func getNetworkIDByName(c *check.C, name string) string { + var ( + v = url.Values{} + filterArgs = filters.NewArgs() + ) + filterArgs.Add("name", name) + filterJSON, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + v.Set("filters", filterJSON) + + resp, body, err := request.Get("/networks?" + v.Encode()) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + nJSON := []types.NetworkResource{} + err = json.NewDecoder(body).Decode(&nJSON) + c.Assert(err, checker.IsNil) + c.Assert(len(nJSON), checker.Equals, 1) + + return nJSON[0].ID +} + +func getNetworkResource(c *check.C, id string) *types.NetworkResource { + _, obj, err := request.Get("/networks/" + id) + c.Assert(err, checker.IsNil) + + nr := types.NetworkResource{} + err = json.NewDecoder(obj).Decode(&nr) + c.Assert(err, checker.IsNil) + + return &nr +} + +func createNetwork(c *check.C, config types.NetworkCreateRequest, shouldSucceed bool) string { + resp, body, err := request.Post("/networks/create", request.JSONBody(config)) + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + if !shouldSucceed { + c.Assert(resp.StatusCode, checker.Not(checker.Equals), http.StatusCreated) + return "" + } + + c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) + + var nr types.NetworkCreateResponse + err = json.NewDecoder(body).Decode(&nr) + c.Assert(err, checker.IsNil) + + return nr.ID +} + +func connectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + resp, _, err := request.Post("/networks/"+nid+"/connect", request.JSONBody(config)) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func disconnectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + resp, _, err := request.Post("/networks/"+nid+"/disconnect", request.JSONBody(config)) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func deleteNetwork(c *check.C, id string, shouldSucceed bool) { + resp, _, err := request.Delete("/networks/" + id) + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + if !shouldSucceed { + c.Assert(resp.StatusCode, checker.Not(checker.Equals), http.StatusOK) + return + } + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) +} diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go new file mode 100644 index 0000000..4a07fc7 --- /dev/null +++ b/integration-cli/docker_api_resize_test.go @@ -0,0 +1,45 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestResizeAPIResponse(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, _, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIHeightWidthNoInt(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIResponseWhenContainerNotStarted(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", cleanedContainerID) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, body, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + + c.Assert(getErrorMessage(c, body), checker.Contains, "is not running", check.Commentf("resize should fail with message 'Container is not running'")) +} diff --git a/integration-cli/docker_api_session_test.go b/integration-cli/docker_api_session_test.go new file mode 100644 index 0000000..e1ad880 --- /dev/null +++ b/integration-cli/docker_api_session_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestSessionCreate(c *check.C) { + testRequires(c, ExperimentalDaemon) + + res, body, err := request.Post("/session", func(r *http.Request) error { + r.Header.Set("X-Docker-Expose-Session-Uuid", "testsessioncreate") // so we don't block default name if something else is using it + r.Header.Set("Upgrade", "h2c") + return nil + }) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusSwitchingProtocols) + c.Assert(res.Header.Get("Upgrade"), checker.Equals, "h2c") + c.Assert(body.Close(), checker.IsNil) +} + +func (s *DockerSuite) TestSessionCreateWithBadUpgrade(c *check.C) { + testRequires(c, ExperimentalDaemon) + + res, body, err := request.Post("/session") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + buf, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "no upgrade") + + res, body, err = request.Post("/session", func(r *http.Request) error { + r.Header.Set("Upgrade", "foo") + return nil + }) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + buf, err = testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + + out = string(buf) + c.Assert(out, checker.Contains, "not supported") +} diff --git a/integration-cli/docker_api_stats_test.go b/integration-cli/docker_api_stats_test.go new file mode 100644 index 0000000..f1cb5bb --- /dev/null +++ b/integration-cli/docker_api_stats_test.go @@ -0,0 +1,310 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os/exec" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") + +func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;usleep 100; do echo 'Hello'; done") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + var cpuPercent = 0.0 + + if testEnv.DaemonPlatform() != "windows" { + cpuDelta := float64(v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage) + systemDelta := float64(v.CPUStats.SystemUsage - v.PreCPUStats.SystemUsage) + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } else { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + cpuPercent = float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + } + + c.Assert(cpuPercent, check.Not(checker.Equals), 0.0, check.Commentf("docker stats with no-stream get cpu usage failed: was %v", cpuPercent)) +} + +func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1") + id := strings.TrimSpace(out) + + getGoRoutines := func() int { + _, body, err := request.Get(fmt.Sprintf("/info")) + c.Assert(err, checker.IsNil) + info := types.Info{} + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + return info.NGoroutines + } + + // When the HTTP connection is closed, the number of goroutines should not increase. + routines := getGoRoutines() + _, body, err := request.Get(fmt.Sprintf("/containers/%s/stats", id)) + c.Assert(err, checker.IsNil) + body.Close() + + t := time.After(30 * time.Second) + for { + select { + case <-t: + c.Assert(getGoRoutines(), checker.LessOrEqualThan, routines) + return + default: + if n := getGoRoutines(); n <= routines { + return + } + time.Sleep(200 * time.Millisecond) + } + } +} + +func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) { + testRequires(c, SameHostDaemon) + + out := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + // Retrieve the container address + net := "bridge" + if testEnv.DaemonPlatform() == "windows" { + net = "nat" + } + contIP := findContainerIP(c, id, net) + numPings := 1 + + var preRxPackets uint64 + var preTxPackets uint64 + var postRxPackets uint64 + var postTxPackets uint64 + + // Get the container networking stats before and after pinging the container + nwStatsPre := getNetworkStats(c, id) + for _, v := range nwStatsPre { + preRxPackets += v.RxPackets + preTxPackets += v.TxPackets + } + + countParam := "-c" + if runtime.GOOS == "windows" { + countParam = "-n" // Ping count parameter is -n on Windows + } + pingout, err := exec.Command("ping", contIP, countParam, strconv.Itoa(numPings)).CombinedOutput() + if err != nil && runtime.GOOS == "linux" { + // If it fails then try a work-around, but just for linux. + // If this fails too then go back to the old error for reporting. + // + // The ping will sometimes fail due to an apparmor issue where it + // denies access to the libc.so.6 shared library - running it + // via /lib64/ld-linux-x86-64.so.2 seems to work around it. + pingout2, err2 := exec.Command("/lib64/ld-linux-x86-64.so.2", "/bin/ping", contIP, "-c", strconv.Itoa(numPings)).CombinedOutput() + if err2 == nil { + pingout = pingout2 + err = err2 + } + } + c.Assert(err, checker.IsNil) + pingouts := string(pingout[:]) + nwStatsPost := getNetworkStats(c, id) + for _, v := range nwStatsPost { + postRxPackets += v.RxPackets + postTxPackets += v.TxPackets + } + + // Verify the stats contain at least the expected number of packets + // On Linux, account for ARP. + expRxPkts := preRxPackets + uint64(numPings) + expTxPkts := preTxPackets + uint64(numPings) + if testEnv.DaemonPlatform() != "windows" { + expRxPkts++ + expTxPkts++ + } + c.Assert(postTxPackets, checker.GreaterOrEqualThan, expTxPkts, + check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, postTxPackets, pingouts)) + c.Assert(postRxPackets, checker.GreaterOrEqualThan, expRxPkts, + check.Commentf("Reported less RxPackets than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) +} + +func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) { + // Windows doesn't support API versions less than 1.25, so no point testing 1.17 .. 1.21 + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + wg := sync.WaitGroup{} + + for i := 17; i <= 21; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + apiVersion := fmt.Sprintf("v1.%d", i) + statsJSONBlob := getVersionedStats(c, id, apiVersion) + if versions.LessThan(apiVersion, "v1.21") { + c.Assert(jsonBlobHasLTv121NetworkStats(statsJSONBlob), checker.Equals, true, + check.Commentf("Stats JSON blob from API %s %#v does not look like a =v1.21 API stats structure", apiVersion, statsJSONBlob)) + } + }(i) + } + wg.Wait() +} + +func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { + var st *types.StatsJSON + + _, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) + c.Assert(err, checker.IsNil) + + err = json.NewDecoder(body).Decode(&st) + c.Assert(err, checker.IsNil) + body.Close() + + return st.Networks +} + +// getVersionedStats returns stats result for the +// container with id using an API call with version apiVersion. Since the +// stats result type differs between API versions, we simply return +// map[string]interface{}. +func getVersionedStats(c *check.C, id string, apiVersion string) map[string]interface{} { + stats := make(map[string]interface{}) + + _, body, err := request.Get(fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id)) + c.Assert(err, checker.IsNil) + defer body.Close() + + err = json.NewDecoder(body).Decode(&stats) + c.Assert(err, checker.IsNil, check.Commentf("failed to decode stat: %s", err)) + + return stats +} + +func jsonBlobHasLTv121NetworkStats(blob map[string]interface{}) bool { + networkStatsIntfc, ok := blob["network"] + if !ok { + return false + } + networkStats, ok := networkStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkStats[expectedKey]; !ok { + return false + } + } + return true +} + +func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool { + networksStatsIntfc, ok := blob["networks"] + if !ok { + return false + } + networksStats, ok := networksStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, networkInterfaceStatsIntfc := range networksStats { + networkInterfaceStats, ok := networkInterfaceStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkInterfaceStats[expectedKey]; !ok { + return false + } + } + } + return true +} + +func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) { + testRequires(c, DaemonIsLinux) + + status, _, err := request.SockRequest("GET", "/containers/nonexistent/stats", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + + status, _, err = request.SockRequest("GET", "/containers/nonexistent/stats?stream=0", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out1 := runSleepingContainer(c) + id1 := strings.TrimSpace(out1) + c.Assert(waitRun(id1), checker.IsNil) + + out2 := runSleepingContainer(c, "--net", "container:"+id1) + id2 := strings.TrimSpace(out2) + c.Assert(waitRun(id2), checker.IsNil) + + ch := make(chan error) + go func() { + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id2)) + defer body.Close() + if err != nil { + ch <- err + } + if resp.StatusCode != http.StatusOK { + ch <- fmt.Errorf("Invalid StatusCode %v", resp.StatusCode) + } + if resp.Header.Get("Content-Type") != "application/json" { + ch <- fmt.Errorf("Invalid 'Content-Type' %v", resp.Header.Get("Content-Type")) + } + var v *types.Stats + if err := json.NewDecoder(body).Decode(&v); err != nil { + ch <- err + } + ch <- nil + }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("Error in stats Engine API: %v", err)) + case <-time.After(15 * time.Second): + c.Fatalf("Stats did not return after timeout") + } +} diff --git a/integration-cli/docker_api_stats_unix_test.go b/integration-cli/docker_api_stats_unix_test.go new file mode 100644 index 0000000..627d335 --- /dev/null +++ b/integration-cli/docker_api_stats_unix_test.go @@ -0,0 +1,42 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIStatsContainerGetMemoryLimit(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport) + + resp, body, err := request.Get("/info", request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + var info types.Info + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + + // don't set a memory limit, the memory limit should be system memory + conName := "foo" + dockerCmd(c, "run", "-d", "--name", conName, "busybox", "top") + c.Assert(waitRun(conName), checker.IsNil) + + resp, body, err = request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", conName)) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + c.Assert(fmt.Sprintf("%d", v.MemoryStats.Limit), checker.Equals, fmt.Sprintf("%d", info.MemTotal)) +} diff --git a/integration-cli/docker_api_swarm_config_test.go b/integration-cli/docker_api_swarm_config_test.go new file mode 100644 index 0000000..fab65cc --- /dev/null +++ b/integration-cli/docker_api_swarm_config_test.go @@ -0,0 +1,118 @@ +// +build !windows + +package main + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestAPISwarmConfigsEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + configs := d.ListConfigs(c) + c.Assert(configs, checker.NotNil) + c.Assert(len(configs), checker.Equals, 0, check.Commentf("configs: %#v", configs)) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + configs := d.ListConfigs(c) + c.Assert(len(configs), checker.Equals, 1, check.Commentf("configs: %#v", configs)) + name := configs[0].Spec.Annotations.Name + c.Assert(name, checker.Equals, testName, check.Commentf("configs: %s", name)) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsDelete(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.ID, checker.Equals, id, check.Commentf("config: %v", config)) + + d.DeleteConfig(c, config.ID) + status, out, err := d.SockRequest("GET", "/configs/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("config delete: %s", string(out))) +} + +func (s *DockerSwarmSuite) TestAPISwarmConfigsUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "test": "test1", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.ID, checker.Equals, id, check.Commentf("config: %v", config)) + + // test UpdateConfig with full ID + d.UpdateConfig(c, id, func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test1", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test1", check.Commentf("config: %v", config)) + + // test UpdateConfig with full name + d.UpdateConfig(c, config.Spec.Name, func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test2", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test2", check.Commentf("config: %v", config)) + + // test UpdateConfig with prefix ID + d.UpdateConfig(c, id[:1], func(s *swarm.Config) { + s.Spec.Labels = map[string]string{ + "test": "test3", + } + }) + + config = d.GetConfig(c, id) + c.Assert(config.Spec.Labels["test"], checker.Equals, "test3", check.Commentf("config: %v", config)) + + // test UpdateConfig in updating Data which is not supported in daemon + // this test will produce an error in func UpdateConfig + config = d.GetConfig(c, id) + config.Spec.Data = []byte("TESTINGDATA2") + + url := fmt.Sprintf("/configs/%s/update?version=%d", config.ID, config.Version.Index) + status, out, err := d.SockRequest("POST", url, config.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) +} diff --git a/integration-cli/docker_api_swarm_node_test.go b/integration-cli/docker_api_swarm_node_test.go new file mode 100644 index 0000000..98f8055 --- /dev/null +++ b/integration-cli/docker_api_swarm_node_test.go @@ -0,0 +1,128 @@ +// +build !windows + +package main + +import ( + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + nodes := d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + +loop0: + for _, n := range nodes { + for _, d := range []*daemon.Swarm{d1, d2, d3} { + if n.ID == d.NodeID { + continue loop0 + } + } + c.Errorf("unknown nodeID %v", n.ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + nodes := d.ListNodes(c) + + d.UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + n := d.GetNode(c, nodes[0].ID) + c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { + testRequires(c, Network) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + _ = s.AddDaemon(c, true, false) + + nodes := d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + + // Getting the info so we can take the NodeID + d2Info, err := d2.SwarmInfo() + c.Assert(err, checker.IsNil) + + // forceful removal of d2 should work + d1.RemoveNode(c, d2Info.NodeID, true) + + nodes = d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) + + // Restart the node that was removed + d2.Restart(c) + + // Give some time for the node to rejoin + time.Sleep(1 * time.Second) + + // Make sure the node didn't rejoin + nodes = d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + // start a service, expect balanced distribution + instances := 8 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) + + // set d2 back to active + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityActive + }) + + instances = 1 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + instances = 8 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + // drained node first so we don't get any old containers + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + d2ContainerCount := len(d2.ActiveContainers()) + + // set d2 to paused, scale service up, only d1 gets new tasks + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + instances = 14 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances-d2ContainerCount) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, d2ContainerCount) + +} diff --git a/integration-cli/docker_api_swarm_secret_test.go b/integration-cli/docker_api_swarm_secret_test.go new file mode 100644 index 0000000..cb82af8 --- /dev/null +++ b/integration-cli/docker_api_swarm_secret_test.go @@ -0,0 +1,132 @@ +// +build !windows + +package main + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + secrets := d.ListSecrets(c) + c.Assert(secrets, checker.NotNil) + c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets)) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + secretSpec := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + } + + id := d.CreateSecret(c, secretSpec) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secrets := d.ListSecrets(c) + c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) + name := secrets[0].Spec.Annotations.Name + c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) + + // create an already existing secret, daemon should return a status code of 409 + status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("secret create: %s", string(out))) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) + + d.DeleteSecret(c, secret.ID) + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) + + // delete non-existing secret, daemon should return a status code of 404 + id = "non-existing" + status, out, err = d.SockRequest("DELETE", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) + +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "test": "test1", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) + + // test UpdateSecret with full ID + d.UpdateSecret(c, id, func(s *swarm.Secret) { + s.Spec.Labels = map[string]string{ + "test": "test1", + } + }) + + secret = d.GetSecret(c, id) + c.Assert(secret.Spec.Labels["test"], checker.Equals, "test1", check.Commentf("secret: %v", secret)) + + // test UpdateSecret with full name + d.UpdateSecret(c, secret.Spec.Name, func(s *swarm.Secret) { + s.Spec.Labels = map[string]string{ + "test": "test2", + } + }) + + secret = d.GetSecret(c, id) + c.Assert(secret.Spec.Labels["test"], checker.Equals, "test2", check.Commentf("secret: %v", secret)) + + // test UpdateSecret with prefix ID + d.UpdateSecret(c, id[:1], func(s *swarm.Secret) { + s.Spec.Labels = map[string]string{ + "test": "test3", + } + }) + + secret = d.GetSecret(c, id) + c.Assert(secret.Spec.Labels["test"], checker.Equals, "test3", check.Commentf("secret: %v", secret)) + + // test UpdateSecret in updating Data which is not supported in daemon + // this test will produce an error in func UpdateSecret + secret = d.GetSecret(c, id) + secret.Spec.Data = []byte("TESTINGDATA2") + + url := fmt.Sprintf("/secrets/%s/update?version=%d", secret.ID, secret.Version.Index) + status, out, err := d.SockRequest("POST", url, secret.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) +} diff --git a/integration-cli/docker_api_swarm_service_test.go b/integration-cli/docker_api_swarm_service_test.go new file mode 100644 index 0000000..5360949 --- /dev/null +++ b/integration-cli/docker_api_swarm_service_test.go @@ -0,0 +1,598 @@ +// +build !windows + +package main + +import ( + "fmt" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func setPortConfig(portConfig []swarm.PortConfig) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.EndpointSpec == nil { + s.Spec.EndpointSpec = &swarm.EndpointSpec{} + } + s.Spec.EndpointSpec.Ports = portConfig + } +} + +func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service with a port mapping of 8080:8081. + portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} + serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} + remoteService := d.GetService(c, serviceID) + d.UpdateService(c, remoteService, setPortConfig(updatedPortConfig)) + + // Inspect the service and verify port mapping. + updatedService := d.GetService(c, serviceID) + c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) + c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, check.Equals, uint32(8082)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + services := d.ListServices(c) + c.Assert(services, checker.NotNil) + c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + // insertDefaults inserts UpdateConfig when service is fetched by ID + _, out, err := d.SockRequest("GET", "/services/"+id+"?insertDefaults=true", nil) + c.Assert(err, checker.IsNil, check.Commentf("%s", out)) + c.Assert(string(out), checker.Contains, "UpdateConfig") + + // insertDefaults inserts UpdateConfig when service is fetched by ID + _, out, err = d.SockRequest("GET", "/services/top?insertDefaults=true", nil) + c.Assert(err, checker.IsNil, check.Commentf("%s", out)) + c.Assert(string(out), checker.Contains, "UpdateConfig") + + service := d.GetService(c, id) + instances = 5 + d.UpdateService(c, service, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + d.RemoveService(c, service.ID) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + instances := 9 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.GreaterThan, 0) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + // reconciliation on d2 node down + d2.Stop(c) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + // test downscaling + instances = 5 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + d1.CreateService(c, simpleTestService, setGlobalMode) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.Equals, 1) + + d4 := s.AddDaemon(c, true, false) + d5 := s.AddDaemon(c, true, false) + + waitAndAssert(c, defaultReconciliationTimeout, d4.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d5.CheckActiveContainerCount, checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:test" + + // create a different tag + for _, d := range daemons { + out, err := d.Cmd("tag", image1, image2) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } + + // create service + instances := 5 + parallelism := 2 + rollbackParallelism := 3 + id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].GetService(c, id) + daemons[0].UpdateService(c, service, setImage(image2)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback used to be a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) { + d := s.AddDaemon(c, true, true) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "testhealth" + + // service started from this image won't pass health check + _, _, err := d.BuildImageWithOut(image2, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=30s --retries=1024 \ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + // create service + instances := 5 + parallelism := 2 + rollbackParallelism := 3 + id := d.CreateService(c, serviceForUpdate, setInstances(instances), setUpdateOrder(swarm.UpdateOrderStartFirst), setRollbackOrder(swarm.UpdateOrderStartFirst)) + + checkStartingTasks := func(expected int) []swarm.Task { + var startingTasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, id) + startingTasks = nil + for _, t := range tasks { + if t.Status.State == swarm.TaskStateStarting { + startingTasks = append(startingTasks, t) + } + } + return startingTasks, nil + }, checker.HasLen, expected) + + return startingTasks + } + + makeTasksHealthy := func(tasks []swarm.Task) { + for _, t := range tasks { + containerID := t.Status.ContainerStatus.ContainerID + d.Cmd("exec", containerID, "touch", "/status") + } + } + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := d.GetService(c, id) + d.UpdateService(c, service, setImage(image2)) + + // first batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks := checkStartingTasks(parallelism) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks = checkStartingTasks(parallelism) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks = checkStartingTasks(1) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := d.Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:badtag" + + // create service + instances := 5 + id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].GetService(c, id) + daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) + + // should update 2 tasks and then pause + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) + v, _ := daemons[0].CheckServiceRunningTasks(id)(c) + c.Assert(v, checker.Equals, instances-2) + + // Roll back to the previous version. This uses the CLI because + // rollback used to be a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // create service + constraints := []string{"node.role==worker"} + instances := 3 + id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + // validate tasks are running on worker nodes + tasks := daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + node := daemons[0].GetNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.role!=worker"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are running on manager nodes + for _, task := range tasks { + node := daemons[0].GetNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.role==nosuchrole"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + // validate tasks are not assigned to any node + tasks = daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].ListNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "low", + } + }) + } + + // create service + instances := 3 + constraints := []string{"node.labels.security==high"} + id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].GetServiceTasks(c, id) + // validate all tasks are running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[0].ID) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.labels.security!=high"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + // validate all tasks are NOT running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) + } + //remove service + daemons[0].RemoveService(c, id) + + constraints = []string{"node.labels.security==medium"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + //remove service + daemons[0].RemoveService(c, id) + + // multiple constraints + constraints = []string{ + "node.labels.security==high", + fmt.Sprintf("node.id==%s", nodes[1].ID), + } + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + // make nodes[1] fulfills the constraints + daemons[0].UpdateNode(c, nodes[1].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[1].ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Swarm + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].ListNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "rack": "a", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "rack": "b", + } + }) + } + + // create service + instances := 4 + prefs := []swarm.PlacementPreference{{Spread: &swarm.SpreadOver{SpreadDescriptor: "node.labels.rack"}}} + id := daemons[0].CreateService(c, simpleTestService, setPlacementPrefs(prefs), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].GetServiceTasks(c, id) + // validate all tasks are running on nodes[0] + tasksOnNode := make(map[string]int) + for _, task := range tasks { + tasksOnNode[task.NodeID]++ + } + c.Assert(tasksOnNode[nodes[0].ID], checker.Equals, 2) + c.Assert(tasksOnNode[nodes[1].ID], checker.Equals, 1) + c.Assert(tasksOnNode[nodes[2].ID], checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept + + instances := 9 + d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + getContainers := func() map[string]*daemon.Swarm { + m := make(map[string]*daemon.Swarm) + for _, d := range []*daemon.Swarm{d1, d2, d3} { + for _, id := range d.ActiveContainers() { + m[id] = d + } + } + return m + } + + containers := getContainers() + c.Assert(containers, checker.HasLen, instances) + var toRemove string + for i := range containers { + toRemove = i + } + + _, err := containers[toRemove].Cmd("stop", toRemove) + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + containers2 := getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } + + containers = containers2 + for i := range containers { + toRemove = i + } + + // try with killing process outside of docker + pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove) + c.Assert(err, checker.IsNil) + pid, err := strconv.Atoi(strings.TrimSpace(pidStr)) + c.Assert(err, checker.IsNil) + c.Assert(syscall.Kill(pid, syscall.SIGKILL), checker.IsNil) + + time.Sleep(time.Second) // give some time to handle the signal + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + containers2 = getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } +} diff --git a/integration-cli/docker_api_swarm_test.go b/integration-cli/docker_api_swarm_test.go new file mode 100644 index 0000000..03cf899 --- /dev/null +++ b/integration-cli/docker_api_swarm_test.go @@ -0,0 +1,1039 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/swarmkit/ca" + "github.com/go-check/check" +) + +var defaultReconciliationTimeout = 30 * time.Second + +func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { + // todo: should find a better way to verify that components are running than /info + d1 := s.AddDaemon(c, true, true) + info, err := d1.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.Cluster.RootRotationInProgress, checker.False) + + d2 := s.AddDaemon(c, true, false) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Leaving cluster + c.Assert(d2.Leave(false), checker.IsNil) + + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Current state restoring after restarts + d1.Stop(c) + d2.Stop(c) + + d1.Start(c) + d2.Start(c) + + info, err = d1.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + + // todo: error message differs depending if some components of token are valid + + d2 := s.AddDaemon(c, false, false) + err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err := d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "invalid join token") + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken := d1.JoinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change tokens + d1.RotateTokens(c) + + err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken = d1.JoinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change spec, don't change tokens + d1.UpdateSwarm(c, func(s *swarm.Spec) {}) + + err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d1.UpdateSwarm(c, func(s *swarm.Spec) { + s.CAConfig.ExternalCAs = []*swarm.ExternalCA{ + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://thishasnoca.org", + }, + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://thishasacacert.org", + CACert: "cacert", + }, + } + }) + info, err := d1.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert") +} + +func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + splitToken := strings.Split(d1.JoinTokens(c).Worker, "-") + splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" + replacementToken := strings.Join(splitToken, "-") + err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") +} + +func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d2 := s.AddDaemon(c, true, false) + + info, err := d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) + + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleWorker + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False) + + // Wait for the role to change to worker in the cert. This is partially + // done because it's something worth testing in its own right, and + // partially because changing the role from manager to worker and then + // back to manager quickly might cause the node to pause for awhile + // while waiting for the role to change to worker, and the test can + // time out during this interval. + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt")) + if err != nil { + return "", check.Commentf("error: %v", err) + } + certs, err := helpers.ParseCertificatesPEM(certBytes) + if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 { + return certs[0].Subject.OrganizationalUnit[0], nil + } + return "", check.Commentf("could not get organizational unit from certificate") + }, checker.Equals, "swarm-worker") + + // Demoting last node should fail + node := d1.GetNode(c, d1.NodeID) + node.Spec.Role = swarm.NodeRoleWorker + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d1.SockRequest("POST", url, node.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out))) + // The warning specific to demoting the last manager is best-effort and + // won't appear until the Role field of the demoted manager has been + // updated. + // Yes, I know this looks silly, but checker.Matches is broken, since + // it anchors the regexp contrary to the documentation, and this makes + // it impossible to match something that includes a line break. + if !strings.Contains(string(out), "last manager of the swarm") { + c.Assert(string(out), checker.Contains, "this would result in a loss of quorum") + } + info, err = d1.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.ControlAvailable, checker.True) + + // Promote already demoted node + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { + // add three managers, one of these is leader + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // start a service by hitting each of the 3 managers + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test1" + }) + d2.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test2" + }) + d3.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test3" + }) + + // 3 services should be started now, because the requests were proxied to leader + // query each node and make sure it returns 3 services + for _, d := range []*daemon.Swarm{d1, d2, d3} { + services := d.ListServices(c) + c.Assert(services, checker.HasLen, 3) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { + // Create 3 nodes + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // assert that the first node we made is the leader, and the other two are followers + c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) + c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) + c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) + + d1.Stop(c) + + var ( + leader *daemon.Swarm // keep track of leader + followers []*daemon.Swarm // keep track of followers + ) + checkLeader := func(nodes ...*daemon.Swarm) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + // clear these out before each run + leader = nil + followers = nil + for _, d := range nodes { + if d.GetNode(c, d.NodeID).ManagerStatus.Leader { + leader = d + } else { + followers = append(followers, d) + } + } + + if leader == nil { + return false, check.Commentf("no leader elected") + } + + return true, check.Commentf("elected %v", leader.ID()) + } + } + + // wait for an election to occur + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True) + + // assert that we have a new leader + c.Assert(leader, checker.NotNil) + + // Keep track of the current leader, since we want that to be chosen. + stableleader := leader + + // add the d1, the initial leader, back + d1.Start(c) + + // TODO(stevvooe): may need to wait for rejoin here + + // wait for possible election + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True) + // pick out the leader and the followers again + + // verify that we still only have 1 leader and 2 followers + c.Assert(leader, checker.NotNil) + c.Assert(followers, checker.HasLen, 2) + // and that after we added d1 back, the leader hasn't changed + c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID) +} + +func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d1.CreateService(c, simpleTestService) + + d2.Stop(c) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) + + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top1" + }) + + d3.Stop(c) + + var service swarm.Service + simpleTestService(&service) + service.Spec.Name = "top2" + status, out, err := d1.SockRequest("POST", "/services/create", service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out))) + + d2.Start(c) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) + + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top3" + }) +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + d.CreateService(c, simpleTestService, setInstances(instances)) + + id, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1) + + c.Assert(d.Leave(false), checker.NotNil) + c.Assert(d.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + id2, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23629 +func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { + testRequires(c, Network) + s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + + id, err := d2.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + err = d2.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + info, err := d2.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + c.Assert(d2.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) + + id2, err := d2.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23705 +func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { + testRequires(c, Network) + d := s.AddDaemon(c, false, false) + err := d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + d.Stop(c) + d.Start(c) + + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { + d1 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + + d1.GetService(c, id) + d1.Stop(c) + d1.Start(c) + d1.GetService(c, id) + + d2 := s.AddDaemon(c, true, true) + d2.GetService(c, id) + d2.Stop(c) + d2.Start(c) + d2.GetService(c, id) + + d3 := s.AddDaemon(c, true, true) + d3.GetService(c, id) + d3.Stop(c) + d3.Start(c) + d3.GetService(c, id) + + d3.Kill() + time.Sleep(1 * time.Second) // time to handle signal + d3.Start(c) + d3.GetService(c, id) +} + +func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + containers := d.ActiveContainers() + instances = 4 + d.UpdateService(c, d.GetService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + containers2 := d.ActiveContainers() + +loop0: + for _, c1 := range containers { + for _, c2 := range containers2 { + if c1 == c2 { + continue loop0 + } + } + c.Errorf("container %v not found in new set %#v", c1, containers2) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) { + d := s.AddDaemon(c, false, false) + req := swarm.InitRequest{ + ListenAddr: "", + } + status, _, err := d.SockRequest("POST", "/swarm/init", req) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + + req2 := swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + RemoteAddrs: []string{""}, + } + status, _, err = d.SockRequest("POST", "/swarm/join", req2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) +} + +func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) + + d2.Stop(c) + + c.Assert(d1.Init(swarm.InitRequest{ + ForceNewCluster: true, + Spec: swarm.Spec{}, + }), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + + d3 := s.AddDaemon(c, true, true) + info, err := d3.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + instances = 4 + d3.UpdateService(c, d3.GetService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) +} + +func simpleTestService(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + } + s.Spec.Name = "top" +} + +func serviceForUpdate(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: 2, + Delay: 4 * time.Second, + FailureAction: swarm.UpdateFailureActionContinue, + }, + RollbackConfig: &swarm.UpdateConfig{ + Parallelism: 3, + Delay: 4 * time.Second, + FailureAction: swarm.UpdateFailureActionContinue, + }, + } + s.Spec.Name = "updatetest" +} + +func setInstances(replicas int) daemon.ServiceConstructor { + ureplicas := uint64(replicas) + return func(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + } + } +} + +func setUpdateOrder(order string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.UpdateConfig == nil { + s.Spec.UpdateConfig = &swarm.UpdateConfig{} + } + s.Spec.UpdateConfig.Order = order + } +} + +func setRollbackOrder(order string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.RollbackConfig == nil { + s.Spec.RollbackConfig = &swarm.UpdateConfig{} + } + s.Spec.RollbackConfig.Order = order + } +} + +func setImage(image string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + s.Spec.TaskTemplate.ContainerSpec.Image = image + } +} + +func setFailureAction(failureAction string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.FailureAction = failureAction + } +} + +func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio + } +} + +func setParallelism(parallelism uint64) daemon.ServiceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.Parallelism = parallelism + } +} + +func setConstraints(constraints []string) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.Placement == nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{} + } + s.Spec.TaskTemplate.Placement.Constraints = constraints + } +} + +func setPlacementPrefs(prefs []swarm.PlacementPreference) daemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.Placement == nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{} + } + s.Spec.TaskTemplate.Placement.Preferences = prefs + } +} + +func setGlobalMode(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + } +} + +func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) { + var totalMCount, totalWCount int + + for _, d := range cl { + var ( + info swarm.Info + err error + ) + + // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error + checkInfo := func(c *check.C) (interface{}, check.CommentInterface) { + info, err = d.SwarmInfo() + return err, check.Commentf("cluster not ready in time") + } + waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) + if !info.ControlAvailable { + totalWCount++ + continue + } + + var leaderFound bool + totalMCount++ + var mCount, wCount int + + for _, n := range d.ListNodes(c) { + waitReady := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Status.State == swarm.NodeStateReady { + return true, nil + } + nn := d.GetNode(c, n.ID) + n = *nn + return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True) + + waitActive := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Spec.Availability == swarm.NodeAvailabilityActive { + return true, nil + } + nn := d.GetNode(c, n.ID) + n = *nn + return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True) + + if n.Spec.Role == swarm.NodeRoleManager { + c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID)) + if n.ManagerStatus.Leader { + leaderFound = true + } + mCount++ + } else { + c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID)) + wCount++ + } + } + c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID)) + c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID)) + c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID)) + } + c.Assert(totalMCount, checker.Equals, managerCount) + c.Assert(totalWCount, checker.Equals, workerCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { + mCount, wCount := 5, 1 + + var nodes []*daemon.Swarm + for i := 0; i < mCount; i++ { + manager := s.AddDaemon(c, true, true) + info, err := manager.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, manager) + } + + for i := 0; i < wCount; i++ { + worker := s.AddDaemon(c, true, false) + info, err := worker.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, worker) + } + + // stop whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *daemon.Swarm) { + defer wg.Done() + if err := daemon.StopWithError(); err != nil { + errs <- err + } + // FIXME(vdemeester) This is duplicated… + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + daemon.Root = filepath.Dir(daemon.Root) + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + // start whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *daemon.Swarm) { + defer wg.Done() + if err := daemon.StartWithError("--iptables=false"); err != nil { + errs <- err + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + checkClusterHealth(c, nodes, mCount, wCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + service := d.GetService(c, id) + instances = 5 + + setInstances(instances)(service) + url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) +} + +// Unlocking an unlocked swarm results in an error +func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) { + d := s.AddDaemon(c, true, true) + err := d.Unlock(swarm.UnlockRequest{UnlockKey: "wrong-key"}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "swarm is not locked") +} + +// #29885 +func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) { + ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort)) + c.Assert(err, checker.IsNil) + defer ln.Close() + d := s.AddDaemon(c, false, false) + err = d.Init(swarm.InitRequest{}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "address already in use") +} + +// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`, +// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`. +// This test makes sure the fixes correctly output scopes instead. +func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + var n1 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "bridge" + + status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n1), checker.IsNil) + + var n2 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n2), checker.IsNil) + + var r1 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r1), checker.IsNil) + + c.Assert(r1.Scope, checker.Equals, "local") + + var r2 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r2), checker.IsNil) + + c.Assert(r2.Scope, checker.Equals, "swarm") +} + +// Test case for 30178 +func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "lb") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + instances := 1 + d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) { + s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{} + s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{ + {Target: "lb"}, + } + }) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + containers := d.ActiveContainers() + + out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) { + m := s.AddDaemon(c, true, true) + w := s.AddDaemon(c, true, false) + + info, err := m.SwarmInfo() + c.Assert(err, checker.IsNil) + + currentTrustRoot := info.Cluster.TLSInfo.TrustRoot + + // rotate multiple times + for i := 0; i < 4; i++ { + var cert, key []byte + if i%2 != 0 { + cert, _, key, err = initca.New(&csr.CertificateRequest{ + CN: "newRoot", + KeyRequest: csr.NewBasicKeyRequest(), + CA: &csr.CAConfig{Expiry: ca.RootCAExpiration}, + }) + c.Assert(err, checker.IsNil) + } + expectedCert := string(cert) + m.UpdateSwarm(c, func(s *swarm.Spec) { + s.CAConfig.SigningCACert = expectedCert + s.CAConfig.SigningCAKey = string(key) + s.CAConfig.ForceRotate++ + }) + + // poll to make sure update succeeds + var clusterTLSInfo swarm.TLSInfo + for j := 0; j < 18; j++ { + info, err := m.SwarmInfo() + c.Assert(err, checker.IsNil) + + // the desired CA cert and key is always redacted + c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "") + c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "") + + clusterTLSInfo = info.Cluster.TLSInfo + + // if root rotation is done and the trust root has changed, we don't have to poll anymore + if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot { + break + } + + // root rotation not done + time.Sleep(250 * time.Millisecond) + } + if cert != nil { + c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert) + } + // could take another second or two for the nodes to trust the new roots after the've all gotten + // new TLS certificates + for j := 0; j < 18; j++ { + mInfo := m.GetNode(c, m.NodeID).Description.TLSInfo + wInfo := m.GetNode(c, w.NodeID).Description.TLSInfo + + if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot { + break + } + + // nodes don't trust root certs yet + time.Sleep(250 * time.Millisecond) + } + + c.Assert(m.GetNode(c, m.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo) + c.Assert(m.GetNode(c, w.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo) + currentTrustRoot = clusterTLSInfo.TrustRoot + } +} + +func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + } + + var n types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + c.Assert(json.Unmarshal(out, &n), checker.IsNil) + + var r types.NetworkResource + + status, body, err := d.SockRequest("GET", "/networks/"+name, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + c.Assert(json.Unmarshal(body, &r), checker.IsNil) + c.Assert(r.Scope, checker.Equals, "swarm") + c.Assert(r.ID, checker.Equals, n.ID) + + v := url.Values{} + v.Set("scope", "local") + + status, body, err = d.SockRequest("GET", "/networks/"+name+"?"+v.Encode(), nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf(string(out))) +} diff --git a/integration-cli/docker_api_test.go b/integration-cli/docker_api_test.go new file mode 100644 index 0000000..1af77ea --- /dev/null +++ b/integration-cli/docker_api_test.go @@ -0,0 +1,122 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIOptionsRoute(c *check.C) { + resp, _, err := request.Do("/", request.Method(http.MethodOptions)) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) { + res, body, err := request.Get("/version") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + body.Close() + // TODO: @runcom incomplete tests, why old integration tests had this headers + // and here none of the headers below are in the response? + //c.Log(res.Header) + //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*") + //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") +} + +func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) { + if testEnv.DaemonPlatform() != runtime.GOOS { + c.Skip("Daemon platform doesn't match test platform") + } + if api.MinVersion == api.DefaultVersion { + c.Skip("API MinVersion==DefaultVersion") + } + v := strings.Split(api.MinVersion, ".") + vMinInt, err := strconv.Atoi(v[1]) + c.Assert(err, checker.IsNil) + vMinInt-- + v[1] = strconv.Itoa(vMinInt) + version := strings.Join(v, ".") + + resp, body, err := request.Get("/v" + version + "/version") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusBadRequest) + expected := fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", version, api.MinVersion) + content, err := ioutil.ReadAll(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(content)), checker.Contains, expected) +} + +func (s *DockerSuite) TestAPIDockerAPIVersion(c *check.C) { + var svrVersion string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + url := r.URL.Path + svrVersion = url + })) + defer server.Close() + + // Test using the env var first + result := cli.Docker(cli.Args("-H="+server.URL[7:], "version"), cli.WithEnvironmentVariables(appendBaseEnv(false, "DOCKER_API_VERSION=xxx")...)) + c.Assert(result, icmd.Matches, icmd.Expected{Out: "API version: xxx", ExitCode: 1}) + c.Assert(svrVersion, check.Equals, "/vxxx/version", check.Commentf("%s", result.Compare(icmd.Success))) +} + +func (s *DockerSuite) TestAPIErrorJSON(c *check.C) { + httpResp, body, err := request.Post("/containers/create", request.JSONBody(struct{}{})) + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) { + // Windows requires API 1.25 or later. This test is validating a behaviour which was present + // in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors + testRequires(c, DaemonIsLinux) + httpResp, body, err := request.Post("/v1.23/containers/create", request.JSONBody(struct{}{})) + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) { + // 404 is a different code path to normal errors, so test separately + httpResp, body, err := request.Get("/notfound", request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") +} + +func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) { + httpResp, body, err := request.Get("/v1.23/notfound", request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := testutil.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") +} diff --git a/integration-cli/docker_api_update_unix_test.go b/integration-cli/docker_api_update_unix_test.go new file mode 100644 index 0000000..1af6ed1 --- /dev/null +++ b/integration-cli/docker_api_update_unix_test.go @@ -0,0 +1,36 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIUpdateContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "apiUpdateContainer" + hostConfig := map[string]interface{}{ + "Memory": 314572800, + "MemorySwap": 524288000, + } + dockerCmd(c, "run", "-d", "--name", name, "-m", "200M", "busybox", "top") + _, _, err := request.SockRequest("POST", "/containers/"+name+"/update", hostConfig, daemonHost()) + c.Assert(err, check.IsNil) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + file = "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} diff --git a/integration-cli/docker_api_version_test.go b/integration-cli/docker_api_version_test.go new file mode 100644 index 0000000..5f919de --- /dev/null +++ b/integration-cli/docker_api_version_test.go @@ -0,0 +1,24 @@ +package main + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestGetVersion(c *check.C) { + status, body, err := request.SockRequest("GET", "/version", nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + var v types.Version + + c.Assert(json.Unmarshal(body, &v), checker.IsNil) + + c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) +} diff --git a/integration-cli/docker_api_volumes_test.go b/integration-cli/docker_api_volumes_test.go new file mode 100644 index 0000000..f354856 --- /dev/null +++ b/integration-cli/docker_api_volumes_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumesAPIList(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "busybox") + + status, b, err := request.SockRequest("GET", "/volumes", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) +} + +func (s *DockerSuite) TestVolumesAPICreate(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + status, b, err := request.SockRequest("POST", "/volumes/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + var vol types.Volume + err = json.Unmarshal(b, &vol) + c.Assert(err, checker.IsNil) + + c.Assert(filepath.Base(filepath.Dir(vol.Mountpoint)), checker.Equals, config.Name) +} + +func (s *DockerSuite) TestVolumesAPIRemove(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox") + + status, b, err := request.SockRequest("GET", "/volumes", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + v := volumes.Volumes[0] + status, _, err = request.SockRequest("DELETE", "/volumes/"+v.Name, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("Should not be able to remove a volume that is in use")) + + dockerCmd(c, "rm", "-f", "test") + status, data, err := request.SockRequest("DELETE", "/volumes/"+v.Name, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf(string(data))) + +} + +func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + // sampling current time minus a minute so to now have false positive in case of delays + now := time.Now().Truncate(time.Minute) + status, b, err := request.SockRequest("POST", "/volumes/create", config, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + status, b, err = request.SockRequest("GET", "/volumes", nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + var vol types.Volume + status, b, err = request.SockRequest("GET", "/volumes/"+config.Name, nil, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + c.Assert(json.Unmarshal(b, &vol), checker.IsNil) + c.Assert(vol.Name, checker.Equals, config.Name) + + // comparing CreatedAt field time for the new volume to now. Removing a minute from both to avoid false positive + testCreatedAt, err := time.Parse(time.RFC3339, strings.TrimSpace(vol.CreatedAt)) + c.Assert(err, check.IsNil) + testCreatedAt = testCreatedAt.Truncate(time.Minute) + if !testCreatedAt.Equal(now) { + c.Assert(fmt.Errorf("Time Volume is CreatedAt not equal to current time"), check.NotNil) + } +} diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go new file mode 100644 index 0000000..33ecb44 --- /dev/null +++ b/integration-cli/docker_cli_attach_test.go @@ -0,0 +1,168 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/cli" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +const attachWait = 5 * time.Second + +func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { + endGroup := &sync.WaitGroup{} + startGroup := &sync.WaitGroup{} + endGroup.Add(3) + startGroup.Add(3) + + cli.DockerCmd(c, "run", "--name", "attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") + cli.WaitRun(c, "attacher") + + startDone := make(chan struct{}) + endDone := make(chan struct{}) + + go func() { + endGroup.Wait() + close(endDone) + }() + + go func() { + startGroup.Wait() + close(startDone) + }() + + for i := 0; i < 3; i++ { + go func() { + cmd := exec.Command(dockerBinary, "attach", "attacher") + + defer func() { + cmd.Wait() + endGroup.Done() + }() + + out, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer out.Close() + + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + buf := make([]byte, 1024) + + if _, err := out.Read(buf); err != nil && err != io.EOF { + c.Fatal(err) + } + + startGroup.Done() + + if !strings.Contains(string(buf), "hello") { + c.Fatalf("unexpected output %s expected hello\n", string(buf)) + } + }() + } + + select { + case <-startDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not initialize properly") + } + + cli.DockerCmd(c, "kill", "attacher") + + select { + case <-endDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not finish properly") + } +} + +func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + done := make(chan error) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "attach", id) + if _, err := cmd.StdinPipe(); err != nil { + done <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + done <- fmt.Errorf("attach should have failed") + return + } else if !strings.Contains(out, expected) { + done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-done: + c.Assert(err, check.IsNil) + case <-time.After(attachWait): + c.Fatal("attach is running but should have failed") + } +} + +func (s *DockerSuite) TestAttachDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") + id := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "attach", id) + stdin, err := cmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Process.Kill() + + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + c.Assert(stdin.Close(), check.IsNil) + + // Expect container to still be running after stdin is closed + running := inspectField(c, id, "State.Running") + c.Assert(running, check.Equals, "true") +} + +func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { + testRequires(c, IsPausable) + runSleepingContainer(c, "-d", "--name=test") + dockerCmd(c, "pause", "test") + + result := dockerCmdWithResult("attach", "test") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 1", + ExitCode: 1, + Err: "You cannot attach to a paused container, unpause it first", + }) +} diff --git a/integration-cli/docker_cli_attach_unix_test.go b/integration-cli/docker_cli_attach_unix_test.go new file mode 100644 index 0000000..78f55e0 --- /dev/null +++ b/integration-cli/docker_cli_attach_unix_test.go @@ -0,0 +1,237 @@ +// +build !windows + +package main + +import ( + "bufio" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #9860 Make sure attach ends when container ends (with no errors) +func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-dti", "busybox", "/bin/sh", "-c", `trap 'exit 0' SIGTERM; while true; do sleep 1; done`) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + + attachCmd := exec.Command(dockerBinary, "attach", id) + attachCmd.Stdin = tty + attachCmd.Stdout = tty + attachCmd.Stderr = tty + err = attachCmd.Start() + c.Assert(err, check.IsNil) + + errChan := make(chan error) + go func() { + time.Sleep(300 * time.Millisecond) + defer close(errChan) + // Container is waiting for us to signal it to stop + dockerCmd(c, "stop", id) + // And wait for the attach command to end + errChan <- attachCmd.Wait() + }() + + // Wait for the docker to end (should be done by the + // stop command in the go routine) + dockerCmd(c, "wait", id) + + select { + case err := <-errChan: + tty.Close() + out, _ := ioutil.ReadAll(pty) + c.Assert(err, check.IsNil, check.Commentf("out: %v", string(out))) + case <-time.After(attachWait): + c.Fatal("timed out without attach returning") + } + +} + +func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { + name := "detachtest" + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + errChan := make(chan error) + go func() { + errChan <- cmd.Run() + close(errChan) + }() + + c.Assert(waitRun(name), check.IsNil) + + cpty.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + cpty.Write([]byte{17}) + + select { + case err := <-errChan: + if err != nil { + buff := make([]byte, 200) + tty.Read(buff) + c.Fatalf("%s: %s", err, buff) + } + case <-time.After(5 * time.Second): + c.Fatal("timeout while detaching") + } + + cpty, tty, err = pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + + cmd = exec.Command(dockerBinary, "attach", name) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + err = cmd.Start() + c.Assert(err, checker.IsNil) + + bytes := make([]byte, 10) + var nBytes int + readErr := make(chan error, 1) + + go func() { + time.Sleep(500 * time.Millisecond) + cpty.Write([]byte("\n")) + time.Sleep(500 * time.Millisecond) + + nBytes, err = cpty.Read(bytes) + cpty.Close() + readErr <- err + }() + + select { + case err := <-readErr: + c.Assert(err, check.IsNil) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for attach read") + } + + err = cmd.Wait() + c.Assert(err, checker.IsNil) + + c.Assert(string(bytes[:nBytes]), checker.Contains, "/ #") + +} + +// TestAttachDetach checks that attach in tty mode can be detached using the long container ID +func (s *DockerSuite) TestAttachDetach(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, check.IsNil) + c.Assert(waitRun(id), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} + +// TestAttachDetachTruncatedID checks that attach in tty mode can be detached +func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := stringid.TruncateID(strings.TrimSpace(out)) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, checker.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} diff --git a/integration-cli/docker_cli_authz_plugin_v2_test.go b/integration-cli/docker_cli_authz_plugin_v2_test.go new file mode 100644 index 0000000..8dc0322 --- /dev/null +++ b/integration-cli/docker_cli_authz_plugin_v2_test.go @@ -0,0 +1,165 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +var ( + authzPluginName = "riyaz/authz-no-volume-plugin" + authzPluginTag = "latest" + authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag + authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest" + nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin" +) + +func init() { +} + +type DockerAuthzV2Suite struct { + ds *DockerSuite + d *daemon.Daemon +} + +func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + s.d.Start(c) +} + +func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + // start the daemon with the plugin and load busybox, --net=none build fails otherwise + // because it needs to pull busybox + s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag) + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // defer disabling the plugin + defer func() { + s.d.Restart(c) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + // Ensure docker run command and accompanying docker ps are successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginDisable(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + // start the daemon with the plugin and load busybox, --net=none build fails otherwise + // because it needs to pull busybox + s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag) + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // defer removing the plugin + defer func() { + s.d.Restart(c) + _, err = s.d.Cmd("plugin", "rm", "-f", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + out, err := s.d.Cmd("volume", "create") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + // disable the plugin + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + + // now test to see if the docker api works. + _, err = s.d.Cmd("volume", "create") + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + + // restart the daemon with the plugin + s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag) + + // defer disabling the plugin + defer func() { + s.d.Restart(c) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + out, err := s.d.Cmd("volume", "create") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + // The plugin will block the command before it can determine the volume does not exist + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "prune", "-f") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginBadManifestFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin with bad manifest + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginBadManifestName) + c.Assert(err, checker.IsNil) + + // start the daemon with the plugin, it will error + c.Assert(s.d.RestartWithError("--authorization-plugin="+authzPluginBadManifestName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + s.d.Restart(c) +} + +func (s *DockerAuthzV2Suite) TestNonexistentAuthZPluginFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + // start the daemon with a non-existent authz plugin, it will error + c.Assert(s.d.RestartWithError("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + s.d.Start(c) +} diff --git a/integration-cli/docker_cli_authz_unix_test.go b/integration-cli/docker_cli_authz_unix_test.go new file mode 100644 index 0000000..959292f --- /dev/null +++ b/integration-cli/docker_cli_authz_unix_test.go @@ -0,0 +1,475 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + + "bufio" + "bytes" + "os/exec" + "strconv" + "time" + + "net" + "net/http/httputil" + "net/url" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +const ( + testAuthZPlugin = "authzplugin" + unauthorizedMessage = "User unauthorized authz plugin" + errorMessage = "something went wrong..." + containerListAPI = "/containers/json" +) + +var ( + alwaysAllowed = []string{"/_ping", "/info"} +) + +func init() { + check.Suite(&DockerAuthzSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzSuite struct { + server *httptest.Server + ds *DockerSuite + d *daemon.Daemon + ctrl *authorizationController +} + +type authorizationController struct { + reqRes authorization.Response // reqRes holds the plugin response to the initial client request + resRes authorization.Response // resRes holds the plugin response to the daemon response + psRequestCnt int // psRequestCnt counts the number of calls to list container request api + psResponseCnt int // psResponseCnt counts the number of calls to list containers response API + requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller + reqUser string + resUser string +} + +func (s *DockerAuthzSuite) SetUpTest(c *check.C) { + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + s.ctrl = &authorizationController{} +} + +func (s *DockerAuthzSuite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + s.ctrl = nil + } +} + +func (s *DockerAuthzSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) + c.Assert(err, check.IsNil) + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) + assertAuthHeaders(c, authReq.RequestHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psRequestCnt++ + } + + s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI) + + reqRes := s.ctrl.reqRes + if isAllowed(authReq.RequestURI) { + reqRes = authorization.Response{Allow: true} + } + if reqRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(reqRes) + c.Assert(err, check.IsNil) + s.ctrl.reqUser = authReq.User + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) + assertAuthHeaders(c, authReq.ResponseHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psResponseCnt++ + } + resRes := s.ctrl.resRes + if isAllowed(authReq.RequestURI) { + resRes = authorization.Response{Allow: true} + } + if resRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(resRes) + c.Assert(err, check.IsNil) + s.ctrl.resUser = authReq.User + w.Write(b) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) + err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) + c.Assert(err, checker.IsNil) +} + +// check for always allowed endpoints to not inhibit test framework functions +func isAllowed(reqURI string) bool { + for _, endpoint := range alwaysAllowed { + if strings.HasSuffix(reqURI, endpoint) { + return true + } + } + return false +} + +// assertAuthHeaders validates authentication headers are removed +func assertAuthHeaders(c *check.C, headers map[string]string) error { + for k := range headers { + if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { + c.Errorf("Found authentication headers in request '%v'", headers) + } + } + return nil +} + +// assertBody asserts that body is removed for non text/json requests +func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) { + if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { + //return fmt.Errorf("Body included for authentication endpoint %s", string(body)) + c.Errorf("Body included for authentication endpoint %s", string(body)) + } + + for k, v := range headers { + if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { + return + } + } + if len(body) > 0 { + c.Errorf("Body included while it should not (Headers: '%v')", headers) + } +} + +func (s *DockerAuthzSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // Ensure command successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) { + + const testDaemonHTTPSAddr = "tcp://localhost:4271" + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + s.d.Start(c, + "--authorization-plugin="+testAuthZPlugin, + "--tlsverify", + "--tlscacert", + "fixtures/https/ca.pem", + "--tlscert", + "fixtures/https/server-cert.pem", + "--tlskey", + "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr) + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "-H", + testDaemonHTTPSAddr, + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } + + c.Assert(s.ctrl.reqUser, check.Equals, "client") + c.Assert(s.ctrl.resUser, check.Equals, "client") +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = false + s.ctrl.reqRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 0) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAPIDenyResponse validates that when authorization plugin deny the request, the status code is forbidden +func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + daemonURL, err := url.Parse(s.d.Sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusForbidden) + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAllowEventStream verifies event stream propagates correctly after request pass through by the authorization plugin +func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { + testRequires(c, DaemonIsLinux) + + // start the daemon and load busybox to avoid pulling busybox from Docker Hub + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) + // Add another command to to enable event pipelining + eventsCmd := exec.Command(dockerBinary, "--host", s.d.Sock(), "events", "--since", startTime) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Assert(err, check.IsNil) + } + + observer := eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + } + + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // Create a container and wait for the creation events + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + containerID := strings.TrimSpace(out) + c.Assert(s.d.WaitRun(containerID), checker.IsNil) + + events := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", events) + processor := processEventMatch(events) + go observer.Match(matcher, processor) + + // Ensure all events are received + for event, eventChannel := range events { + + select { + case <-time.After(30 * time.Second): + // Fail the test + observer.CheckEventError(c, containerID, event, matcher) + c.FailNow() + case <-eventChannel: + // Ignore, event received + } + } + + // Ensure both events and container endpoints are passed to the authorization plugin + assertURIRecorded(c, s.ctrl.requestsURIs, "/events") + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // assert plugin is only called once.. + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) { + s.d.Start(c, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + tmp, err := ioutil.TempDir("", "test-authz-load-import") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + savedImagePath := filepath.Join(tmp, "save.tar") + + out, err := s.d.Cmd("save", "-o", savedImagePath, "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("load", "--input", savedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) + + exportedImagePath := filepath.Join(tmp, "export.tar") + + out, err = s.d.Cmd("run", "-d", "--name", "testexport", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("export", "-o", exportedImagePath, "testexport") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("import", exportedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { + s.d.Start(c, "--debug", "--authorization-plugin="+testAuthZPlugin) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + daemonURL, err := url.Parse(s.d.Sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json") +} + +// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin +func assertURIRecorded(c *check.C, uris []string, uri string) { + var found bool + for _, u := range uris { + if strings.Contains(u, uri) { + found = true + break + } + } + if !found { + c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) + } +} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go new file mode 100644 index 0000000..77b151e --- /dev/null +++ b/integration-cli/docker_cli_build_test.go @@ -0,0 +1,6490 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "text/template" + "time" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/integration-cli/cli/build/fakegit" + "github.com/docker/docker/integration-cli/cli/build/fakestorage" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { + cli.BuildCmd(c, "testbuildjsonemptyrun", build.WithDockerfile(` + FROM busybox + RUN [] + `)) +} + +func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { + name := "testbuildshcmdjsonentrypoint" + expected := "/bin/sh -c echo test" + if testEnv.DaemonPlatform() == "windows" { + expected = "cmd /S /C echo test" + } + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENTRYPOINT ["echo"] + CMD echo test + `)) + out, _ := dockerCmd(c, "run", "--rm", name) + + if strings.TrimSpace(out) != expected { + c.Fatalf("CMD did not contain %q : %q", expected, out) + } +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { + // Windows does not support FROM scratch or the USER command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM scratch + ENV user foo + USER ${user} + `)) + res := inspectFieldJSON(c, name, "Config.User") + + if res != `"foo"` { + c.Fatal("User foo from environment not in Config.User on image") + } +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { + name := "testbuildenvironmentreplacement" + + var volumePath string + + if testEnv.DaemonPlatform() == "windows" { + volumePath = "c:/quux" + } else { + volumePath = "/quux" + } + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+minimalBaseImage()+` + ENV volume `+volumePath+` + VOLUME ${volume} + `)) + + var volumes map[string]interface{} + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &volumes) + if _, ok := volumes[volumePath]; !ok { + c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { + // Windows does not support FROM scratch or the EXPOSE command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM scratch + ENV port 80 + EXPOSE ${port} + ENV ports " 99 100 " + EXPOSE ${ports} + `)) + + var exposedPorts map[string]interface{} + inspectFieldAndUnmarshall(c, name, "Config.ExposedPorts", &exposedPorts) + exp := []int{80, 99, 100} + for _, p := range exp { + tmp := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[tmp]; !ok { + c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p) + } + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `)) + res := inspectFieldJSON(c, name, "Config.WorkingDir") + + expected := `"/work"` + if testEnv.DaemonPlatform() == "windows" { + expected = `"C:\\work"` + } + if res != expected { + c.Fatalf("Workdir /workdir from environment not in Config.WorkingDir on image: %s", res) + } +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM `+minimalBaseImage()+` + ENV baz foo + ENV quux bar + ENV dot . + ENV fee fff + ENV gee ggg + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + ADD ${zzz:-${fee}} ${dot} + COPY ${zzz:-${gee}} ${dot} + `), + build.WithFile("foo", "test1"), + build.WithFile("bar", "test2"), + build.WithFile("fff", "test3"), + build.WithFile("ggg", "test4"), + )) +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENV foo zzz + ENV bar ${foo} + ENV abc1='$foo' + ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" + RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) + ENV abc2="\$foo" + RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) + ENV abc3 '$foo' + RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) + ENV abc4 "\$foo" + RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) + ENV foo2="abc\def" + RUN [ "$foo2" = 'abc\def' ] + ENV foo3="abc\\def" + RUN [ "$foo3" = 'abc\def' ] + ENV foo4='abc\\def' + RUN [ "$foo4" = 'abc\\def' ] + ENV foo5='abc\def' + RUN [ "$foo5" = 'abc\def' ] + `)) + + envResult := []string{} + inspectFieldAndUnmarshall(c, name, "Config.Env", &envResult) + found := false + envCount := 0 + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "zzz" { + c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "zzz" { + c.Fatalf("%s should be 'zzz' but instead its %q", parts[0], parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "foo" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } + } + + if !found { + c.Fatal("Never found the `bar` env variable") + } + + if envCount != 4 { + c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) + } + +} + +func (s *DockerSuite) TestBuildHandleEscapesInVolume(c *check.C) { + // The volume paths used in this test are invalid on Windows + testRequires(c, DaemonIsLinux) + name := "testbuildhandleescapes" + + testCases := []struct { + volumeValue string + expected string + }{ + { + volumeValue: "${FOO}", + expected: "bar", + }, + { + volumeValue: `\${FOO}`, + expected: "${FOO}", + }, + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + { + volumeValue: `\\\\\\\${FOO}`, + expected: `\\\${FOO}`, + }, + } + + for _, tc := range testCases { + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` + FROM scratch + ENV FOO bar + VOLUME %s + `, tc.volumeValue))) + + var result map[string]map[string]struct{} + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result) + if _, ok := result[tc.expected]; !ok { + c.Fatalf("Could not find volume %s set from env foo in volumes table, got %q", tc.expected, result) + } + + // Remove the image for the next iteration + dockerCmd(c, "rmi", name) + } +} + +func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + onbuild run echo quux + `)) + + result := buildImage(name2, build.WithDockerfile(fmt.Sprintf(` + FROM %s + `, name))) + result.Assert(c, icmd.Success) + + if !strings.Contains(result.Combined(), "quux") { + c.Fatalf("Did not receive the expected echo text, got %s", result.Combined()) + } + + if strings.Contains(result.Combined(), "ONBUILD ONBUILD") { + c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", result.Combined()) + } + +} + +func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvescapes" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENV TEST foo + CMD echo \$ + `)) + + out, _ := dockerCmd(c, "run", "-t", name) + if strings.TrimSpace(out) != "$" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvoverwrite" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + ENV TEST foo + CMD echo ${TEST} + `)) + + out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) + if strings.TrimSpace(out) != "bar" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +// FIXME(vdemeester) why we disabled cache here ? +func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + cli.BuildCmd(c, name1, build.WithDockerfile(` +FROM busybox +ONBUILD CMD ["hello world"] +ONBUILD ENTRYPOINT ["echo"] +ONBUILD RUN ["true"]`)) + + cli.BuildCmd(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s`, name1))) + + result := cli.DockerCmd(c, "run", name2) + result.Assert(c, icmd.Expected{Out: "hello world"}) +} + +// FIXME(vdemeester) why we disabled cache here ? +func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + buildImageSuccessfully(c, name1, build.WithDockerfile(` +FROM busybox +ONBUILD ENTRYPOINT ["echo"]`)) + + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1))) + + out, _ := dockerCmd(c, "run", name2) + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatal("got malformed output from onbuild", out) + } + +} + +func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet + name := "testbuildtwoimageswithadd" + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + })) + defer server.Close() + + cli.BuildCmd(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL()))) + + result := cli.Docker(cli.Build(name), build.WithDockerfile(fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL()))) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } +} + +func (s *DockerSuite) TestBuildLastModified(c *check.C) { + // Temporary fix for #30890. TODO @jhowardmsft figure out what + // has changed in the master busybox image. + testRequires(c, DaemonIsLinux) + + name := "testbuildlastmodified" + + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "file": "hello", + })) + defer server.Close() + + var out, out2 string + + dFmt := `FROM busybox +ADD %s/file /` + dockerfile := fmt.Sprintf(dFmt, server.URL()) + + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() + + // Build it again and make sure the mtime of the file didn't change. + // Wait a few seconds to make sure the time changed enough to notice + time.Sleep(2 * time.Second) + + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() + + if out != out2 { + c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2) + } + + // Now 'touch' the file and make sure the timestamp DID change this time + // Create a new fakeStorage instead of just using Add() to help windows + server = fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "file": "hello", + })) + defer server.Close() + + dockerfile = fmt.Sprintf(dFmt, server.URL()) + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined() + + if out == out2 { + c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2) + } + +} + +// Regression for https://github.com/docker/docker/pull/27805 +// Makes sure that we don't use the cache if the contents of +// a file in a subfolder of the context is modified and we re-build. +func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) { + name := "testbuildmodifyfileinfolder" + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +RUN ["mkdir", "/test"] +ADD folder/file /test/changetarget`)) + defer ctx.Close() + if err := ctx.Add("folder/file", "first"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + if err := ctx.Add("folder/file", "second"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + if id1 == id2 { + c.Fatal("cache was used even though file contents in folder was changed") + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testaddimg", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_file", "test1"))) +} + +// Issue #3960: "ADD src ." hangs +func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { + name := "testaddsinglefiletoworkdir" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile( + `FROM busybox + ADD test_file .`), + fakecontext.WithFiles(map[string]string{ + "test_file": "test1", + })) + defer ctx.Close() + + errChan := make(chan error) + go func() { + errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + cli.BuildCmd(c, "testaddsinglefiletoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "robots.txt": "hello", + })) + defer server.Close() + + cli.BuildCmd(c, "testcopymultiplefilestofile", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 %s/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +`, server.URL())), + build.WithFile("test_file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test_file3", "test3"), + build.WithFile("test_file3", "test3"), + build.WithFile("test_file4", "test4"))) +} + +// These tests are mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildUsernamespaceValidateRemappedRoot(c *check.C) { + testRequires(c, DaemonIsLinux) + testCases := []string{ + "ADD . /new_dir", + "COPY test_dir /new_dir", + "WORKDIR /new_dir", + } + name := "testbuildusernamespacevalidateremappedroot" + for _, tc := range testCases { + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +%s +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, tc)), + build.WithFile("test_dir/test_file", "test file"))) + + cli.DockerCmd(c, "rmi", name) + } +} + +func (s *DockerSuite) TestBuildAddAndCopyFileWithWhitespace(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently passing on Windows + name := "testaddfilewithwhitespace" + + for _, command := range []string{"ADD", "COPY"} { + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +%s [ "test file1", "/test_file1" ] +%s [ "test_file2", "/test file2" ] +%s [ "test file3", "/test file3" ] +%s [ "test dir/test_file4", "/test_dir/test_file4" ] +%s [ "test_dir/test_file5", "/test dir/test_file5" ] +%s [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, command, command, command, command, command, command)), + build.WithFile("test file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test file3", "test3"), + build.WithFile("test dir/test_file4", "test4"), + build.WithFile("test_dir/test_file5", "test5"), + build.WithFile("test dir/test_file6", "test6"), + )) + + cli.DockerCmd(c, "rmi", name) + } +} + +func (s *DockerSuite) TestBuildCopyFileWithWhitespaceOnWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := `FROM ` + testEnv.MinimalBaseImage() + ` +RUN mkdir "C:/test dir" +RUN mkdir "C:/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN find "test1" "C:/test_file1" +RUN find "test2" "C:/test file2" +RUN find "test3" "C:/test file3" +RUN find "test4" "C:/test_dir/test_file4" +RUN find "test5" "C:/test dir/test_file5" +RUN find "test6" "C:/test dir/test_file6"` + + name := "testcopyfilewithwhitespace" + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("test file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test file3", "test3"), + build.WithFile("test dir/test_file4", "test4"), + build.WithFile("test_dir/test_file5", "test5"), + build.WithFile("test dir/test_file6", "test6"), + )) +} + +func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { + name := "testcopywildcard" + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + })) + defer server.Close() + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM busybox + COPY file*.txt /tmp/ + RUN ls /tmp/file1.txt /tmp/file2.txt + RUN [ "mkdir", "/tmp1" ] + COPY dir* /tmp1/ + RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file + RUN [ "mkdir", "/tmp2" ] + ADD dir/*dir %s/robots.txt /tmp2/ + RUN ls /tmp2/nest_nest_file /tmp2/robots.txt + `, server.URL())), + fakecontext.WithFiles(map[string]string{ + "file1.txt": "test1", + "file2.txt": "test2", + "dir/nested_file": "nested file", + "dir/nested_dir/nest_nest_file": "2 times nested", + "dirt": "dirty", + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + + // Now make sure we use a cache the 2nd time + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { + // Run this only on Linux + // Below is the original comment (that I don't agree with — vdemeester) + // Normally we would do c.Fatal(err) here but given that + // the odds of this failing are so rare, it must be because + // the OS we're running the client on doesn't support * in + // filenames (like windows). So, instead of failing the test + // just let it pass. Then we don't need to explicitly + // say which OSs this works on or not. + testRequires(c, DaemonIsLinux, UnixCli) + + buildImageSuccessfully(c, "testcopywildcardinname", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox + COPY *.txt /tmp/ + RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] + `), + build.WithFile("*.txt", "hi there"), + )) +} + +func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { + name := "testcopywildcardcache" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox + COPY file1.txt /tmp/`), + fakecontext.WithFiles(map[string]string{ + "file1.txt": "test1", + })) + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + + // Now make sure we use a cache the 2nd time even with wild cards. + // Use the same context so the file is the same and the checksum will match + ctx.Add("Dockerfile", `FROM busybox + COPY file*.txt /tmp/`) + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testaddsinglefiletononexistingdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testadddircontenttoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testadddircontenttoexistingdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testaddwholedirtoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_dir/test_file", "test1"))) +} + +// Testing #5941 : Having an etc directory in context conflicts with the /etc/mtab +func (s *DockerSuite) TestBuildAddOrCopyEtcToRootShouldNotConflict(c *check.C) { + buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` +ADD . /`), + build.WithFile("etc/test_file", "test1"))) + buildImageSuccessfully(c, "testcopyetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` +COPY . /`), + build.WithFile("etc/test_file", "test1"))) +} + +// Testing #9401 : Losing setuid flag after a ADD +func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +ADD suidbin /usr/bin/suidbin +RUN chmod 4755 /usr/bin/suidbin +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] +ADD ./data/ / +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`), + build.WithFile("suidbin", "suidbin"), + build.WithFile("/data/usr/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopysinglefiletoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_file", "test1"))) +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { + name := "testcopysinglefiletoworkdir" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +COPY test_file .`), + fakecontext.WithFiles(map[string]string{ + "test_file": "test1", + })) + defer ctx.Close() + + errChan := make(chan error) + go func() { + errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopysinglefiletoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific + buildImageSuccessfully(c, "testcopysinglefiletononexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopydircontenttoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopydircontenttoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + buildImageSuccessfully(c, "testcopywholedirtoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_dir/test_file", "test1"))) +} + +func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently working on Windows + + dockerfile := ` + FROM scratch + ADD links.tar / + ADD foo.txt /symlink/ + ` + targetFile := "foo.txt" + var ( + name = "test-link-absolute" + ) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) + defer ctx.Close() + + tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + var symlinkTarget string + if runtime.GOOS == "windows" { + var driveLetter string + if abs, err := filepath.Abs(tempDir); err != nil { + c.Fatal(err) + } else { + driveLetter = abs[:1] + } + tempDirWithoutDrive := tempDir[2:] + symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) + } else { + symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) + } + + tarPath := filepath.Join(ctx.Dir, "links.tar") + nonExistingFile := filepath.Join(tempDir, targetFile) + fooPath := filepath.Join(ctx.Dir, targetFile) + + tarOut, err := os.Create(tarPath) + if err != nil { + c.Fatal(err) + } + + tarWriter := tar.NewWriter(tarOut) + + header := &tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: symlinkTarget, + Mode: 0755, + Uid: 0, + Gid: 0, + } + + err = tarWriter.WriteHeader(header) + if err != nil { + c.Fatal(err) + } + + tarWriter.Close() + tarOut.Close() + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { + testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox + const ( + dockerfileTemplate = ` + FROM busybox + RUN ln -s /../../../../../../../../%s /x + VOLUME /x + ADD foo.txt /x/` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute-volume" + dockerfile = "" + ) + + tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) + nonExistingFile := filepath.Join(tempDir, targetFile) + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) + defer ctx.Close() + fooPath := filepath.Join(ctx.Dir, targetFile) + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { + testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows + + { + name := "testbuildinaccessiblefiles" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{"fileWithoutReadAccess": "foo"}), + ) + defer ctx.Close() + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") + + if err := os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown file to root: %s", err) + } + if err := os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + Dir: ctx.Dir, + }) + if result.Error == nil { + c.Fatalf("build should have failed: %s %s", result.Error, result.Combined()) + } + + // check if we've detected the failure before we started building + if !strings.Contains(result.Combined(), "no permission to read from ") { + c.Fatalf("output should've contained the string: no permission to read from but contained: %s", result.Combined()) + } + + if !strings.Contains(result.Combined(), "error checking context") { + c.Fatalf("output should've contained the string: error checking context") + } + } + { + name := "testbuildinaccessibledirectory" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{"directoryWeCantStat/bar": "foo"}), + ) + defer ctx.Close() + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + Dir: ctx.Dir, + }) + if result.Error == nil { + c.Fatalf("build should have failed: %s %s", result.Error, result.Combined()) + } + + // check if we've detected the failure before we started building + if !strings.Contains(result.Combined(), "can't stat") { + c.Fatalf("output should've contained the string: can't access %s", result.Combined()) + } + + if !strings.Contains(result.Combined(), "error checking context") { + c.Fatalf("output should've contained the string: error checking context\ngot:%s", result.Combined()) + } + + } + { + name := "testlinksok" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM scratch\nADD . /foo/")) + defer ctx.Close() + + target := "../../../../../../../../../../../../../../../../../../../azA" + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { + c.Fatal(err) + } + defer os.Remove(target) + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + } + { + name := "testbuildignoredinaccessible" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{ + "directoryWeCantStat/bar": "foo", + ".dockerignore": "directoryWeCantStat", + }), + ) + defer ctx.Close() + // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + result := icmd.RunCmd(icmd.Cmd{ + Dir: ctx.Dir, + Command: []string{"su", "unprivilegeduser", "-c", + fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + }) + result.Assert(c, icmd.Expected{}) + } +} + +func (s *DockerSuite) TestBuildForceRm(c *check.C) { + containerCountBefore := getContainerCount(c) + name := "testbuildforcerm" + + buildImage(name, cli.WithFlags("--force-rm"), build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` + RUN true + RUN thiswillfail`))).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + containerCountAfter := getContainerCount(c) + if containerCountBefore != containerCountAfter { + c.Fatalf("--force-rm shouldn't have left containers behind") + } + +} + +func (s *DockerSuite) TestBuildRm(c *check.C) { + name := "testbuildrm" + + testCases := []struct { + buildflags []string + shouldLeftContainerBehind bool + }{ + // Default case (i.e. --rm=true) + { + buildflags: []string{}, + shouldLeftContainerBehind: false, + }, + { + buildflags: []string{"--rm"}, + shouldLeftContainerBehind: false, + }, + { + buildflags: []string{"--rm=false"}, + shouldLeftContainerBehind: true, + }, + } + + for _, tc := range testCases { + containerCountBefore := getContainerCount(c) + + buildImageSuccessfully(c, name, cli.WithFlags(tc.buildflags...), build.WithDockerfile(`FROM busybox + RUN echo hello world`)) + + containerCountAfter := getContainerCount(c) + if tc.shouldLeftContainerBehind { + if containerCountBefore == containerCountAfter { + c.Fatalf("flags %v should have left containers behind", tc.buildflags) + } + } else { + if containerCountBefore != containerCountAfter { + c.Fatalf("flags %v shouldn't have left containers behind", tc.buildflags) + } + } + + dockerCmd(c, "rmi", name) + } +} + +func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { + testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows + var ( + result map[string]map[string]struct{} + name = "testbuildvolumes" + emptyMap = make(map[string]struct{}) + expected = map[string]map[string]struct{}{ + "/test1": emptyMap, + "/test2": emptyMap, + "/test3": emptyMap, + "/test4": emptyMap, + "/test5": emptyMap, + "/test6": emptyMap, + "[/test7": emptyMap, + "/test8]": emptyMap, + } + ) + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + VOLUME /test1 + VOLUME /test2 + VOLUME /test3 /test4 + VOLUME ["/test5", "/test6"] + VOLUME [/test7 /test8] + `)) + + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result) + + equal := reflect.DeepEqual(&result, &expected) + if !equal { + c.Fatalf("Volumes %s, expected %s", result, expected) + } + +} + +func (s *DockerSuite) TestBuildMaintainer(c *check.C) { + name := "testbuildmaintainer" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio`)) + + expected := "dockerio" + res := inspectField(c, name, "Author") + if res != expected { + c.Fatalf("Maintainer %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildUser(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + expected := "dockerio" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`)) + res := inspectField(c, name, "Config.User") + if res != expected { + c.Fatalf("User %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { + name := "testbuildrelativeworkdir" + + var ( + expected1 string + expected2 string + expected3 string + expected4 string + expectedFinal string + ) + + if testEnv.DaemonPlatform() == "windows" { + expected1 = `C:/` + expected2 = `C:/test1` + expected3 = `C:/test2` + expected4 = `C:/test2/test3` + expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox + } else { + expected1 = `/` + expected2 = `/test1` + expected3 = `/test2` + expected4 = `/test2/test3` + expectedFinal = `/test2/test3` + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN sh -c "[ "$PWD" = "`+expected1+`" ]" + WORKDIR test1 + RUN sh -c "[ "$PWD" = "`+expected2+`" ]" + WORKDIR /test2 + RUN sh -c "[ "$PWD" = "`+expected3+`" ]" + WORKDIR test3 + RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`)) + + res := inspectField(c, name, "Config.WorkingDir") + if res != expectedFinal { + c.Fatalf("Workdir %s, expected %s", res, expectedFinal) + } +} + +// #22181 Regression test. Single end-to-end test of using +// Windows semantics. Most path handling verifications are in unit tests +func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + buildImageSuccessfully(c, "testbuildwindowsworkdirprocessing", build.WithDockerfile(`FROM busybox + WORKDIR C:\\foo + WORKDIR bar + RUN sh -c "[ "$PWD" = "C:/foo/bar" ]" + `)) +} + +// #22181 Regression test. Most paths handling verifications are in unit test. +// One functional test for end-to-end +func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + // TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to + // support backslash such as .\\ being equivalent to ./ and c:\\ being + // equivalent to c:/. This is not currently (nor ever has been) supported + // by docker on the Windows platform. + buildImageSuccessfully(c, "testbuildwindowsaddcopypathprocessing", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox + # No trailing slash on COPY/ADD + # Results in dir being changed to a file + WORKDIR /wc1 + COPY wc1 c:/wc1 + WORKDIR /wc2 + ADD wc2 c:/wc2 + WORKDIR c:/ + RUN sh -c "[ $(cat c:/wc1/wc1) = 'hellowc1' ]" + RUN sh -c "[ $(cat c:/wc2/wc2) = 'worldwc2' ]" + + # Trailing slash on COPY/ADD, Windows-style path. + WORKDIR /wd1 + COPY wd1 c:/wd1/ + WORKDIR /wd2 + ADD wd2 c:/wd2/ + RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" + RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" + `), + build.WithFile("wc1", "hellowc1"), + build.WithFile("wc2", "worldwc2"), + build.WithFile("wd1", "hellowd1"), + build.WithFile("wd2", "worldwd2"), + )) +} + +func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { + name := "testbuildworkdirwithenvvariables" + + var expected string + if testEnv.DaemonPlatform() == "windows" { + expected = `C:\test1\test2` + } else { + expected = `/test1/test2` + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENV DIRPATH /test1 + ENV SUBDIRNAME test2 + WORKDIR $DIRPATH + WORKDIR $SUBDIRNAME/$MISSING_VAR`)) + res := inspectField(c, name, "Config.WorkingDir") + if res != expected { + c.Fatalf("Workdir %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { + // cat /test1/test2/foo gets permission denied for the user + testRequires(c, NotUserNamespace) + + var expected string + if testEnv.DaemonPlatform() == "windows" { + expected = `C:/test1/test2` + } else { + expected = `/test1/test2` + } + + buildImageSuccessfully(c, "testbuildrelativecopy", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox + WORKDIR /test1 + WORKDIR test2 + RUN sh -c "[ "$PWD" = '`+expected+`' ]" + COPY foo ./ + RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" + ADD foo ./bar/baz + RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]" + COPY foo ./bar/baz2 + RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]" + WORKDIR .. + COPY foo ./ + RUN sh -c "[ $(cat /test1/foo) = 'hello' ]" + COPY foo /test3/ + RUN sh -c "[ $(cat /test3/foo) = 'hello' ]" + WORKDIR /test4 + COPY . . + RUN sh -c "[ $(cat /test4/foo) = 'hello' ]" + WORKDIR /test5/test6 + COPY foo ../ + RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" + `), + build.WithFile("foo", "hello"), + )) +} + +func (s *DockerSuite) TestBuildBlankName(c *check.C) { + name := "testbuildblankname" + testCases := []struct { + expression string + expectedStderr string + }{ + { + expression: "ENV =", + expectedStderr: "ENV names can not be blank", + }, + { + expression: "LABEL =", + expectedStderr: "LABEL names can not be blank", + }, + { + expression: "ARG =foo", + expectedStderr: "ARG names can not be blank", + }, + } + + for _, tc := range testCases { + buildImage(name, build.WithDockerfile(fmt.Sprintf(`FROM busybox + %s`, tc.expression))).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: tc.expectedStderr, + }) + } +} + +func (s *DockerSuite) TestBuildEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + name := "testbuildenv" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENV PATH /test:$PATH + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`)) + res := inspectField(c, name, "Config.Env") + if res != expected { + c.Fatalf("Env %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildPATH(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + + defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + + fn := func(dockerfile string, expected string) { + buildImageSuccessfully(c, "testbldpath", build.WithDockerfile(dockerfile)) + res := inspectField(c, "testbldpath", "Config.Env") + if res != expected { + c.Fatalf("Env %q, expected %q for dockerfile:%q", res, expected, dockerfile) + } + } + + tests := []struct{ dockerfile, exp string }{ + {"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM scratch\nENV PATH=/test", "[PATH=/test]"}, + {"FROM busybox\nENV PATH=/test", "[PATH=/test]"}, + {"FROM scratch\nENV PATH=''", "[PATH=]"}, + {"FROM busybox\nENV PATH=''", "[PATH=]"}, + } + + for _, test := range tests { + fn(test.dockerfile, test.exp) + } +} + +func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`)) + + entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = testutil.CompareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + RUN /non/existing/command`)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = testutil.CompareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildCmd(c *check.C) { + name := "testbuildcmd" + expected := "[/bin/echo Hello World]" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + CMD ["/bin/echo", "Hello World"]`)) + + res := inspectField(c, name, "Config.Cmd") + if res != expected { + c.Fatalf("Cmd %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExpose(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexpose" + expected := "map[2375/tcp:{}]" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 2375`)) + + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + // start building docker file with a large number of ports + portList := make([]string, 50) + line := make([]string, 100) + expectedPorts := make([]int, len(portList)*len(line)) + for i := 0; i < len(portList); i++ { + for j := 0; j < len(line); j++ { + p := i*len(line) + j + 1 + line[j] = strconv.Itoa(p) + expectedPorts[p-1] = p + } + if i == len(portList)-1 { + portList[i] = strings.Join(line, " ") + } else { + portList[i] = strings.Join(line, " ") + ` \` + } + } + + dockerfile := `FROM scratch + EXPOSE {{range .}} {{.}} + {{end}}` + tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) + buf := bytes.NewBuffer(nil) + tmpl.Execute(buf, portList) + + name := "testbuildexpose" + buildImageSuccessfully(c, name, build.WithDockerfile(buf.String())) + + // check if all the ports are saved inside Config.ExposedPorts + res := inspectFieldJSON(c, name, "Config.ExposedPorts") + var exposedPorts map[string]interface{} + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + for _, p := range expectedPorts { + ep := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[ep]; !ok { + c.Errorf("Port(%s) is not exposed", ep) + } else { + delete(exposedPorts, ep) + } + } + if len(exposedPorts) != 0 { + c.Errorf("Unexpected extra exposed ports %v", exposedPorts) + } +} + +func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + buildID := func(name, exposed string) string { + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch + EXPOSE %s`, exposed))) + id := inspectField(c, name, "Id") + return id + } + + id1 := buildID("testbuildexpose1", "80 2375") + id2 := buildID("testbuildexpose2", "2375 80") + if id1 != id2 { + c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") + } +} + +func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexposeuppercaseproto" + expected := "map[5678/udp:{}]" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 5678/UDP`)) + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT ["/bin/echo"]`)) + res := inspectField(c, name, "Config.Entrypoint") + + expected := "[/bin/echo]" + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name))) + res = inspectField(c, name2, "Config.Entrypoint") + + expected = "[]" + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { + name := "testbuildentrypoint" + expected := "[]" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT []`)) + + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { + name := "testbuildentrypoint" + + expected := "[/bin/echo]" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`)) + + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +// #6445 ensure ONBUILD triggers aren't committed to grandchildren +func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { + buildImageSuccessfully(c, "testonbuildtrigger1", build.WithDockerfile(` + FROM busybox + RUN echo "GRANDPARENT" + ONBUILD RUN echo "ONBUILD PARENT" + `)) + // ONBUILD should be run in second build. + buildImage("testonbuildtrigger2", build.WithDockerfile("FROM testonbuildtrigger1")).Assert(c, icmd.Expected{ + Out: "ONBUILD PARENT", + }) + // ONBUILD should *not* be run in third build. + result := buildImage("testonbuildtrigger3", build.WithDockerfile("FROM testonbuildtrigger2")) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + } +} + +func (s *DockerSuite) TestBuildSameDockerfileWithAndWithoutCache(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildwithcache" + dockerfile := `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]` + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id2 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id3 := getIDByName(c, name) + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } + if id1 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +// Make sure that ADD/COPY still populate the cache even if they don't use it +func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { + name := "testbuildconditionalcache" + + dockerfile := ` + FROM busybox + ADD foo /tmp/` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + + if err := ctx.Add("foo", "bye"); err != nil { + c.Fatalf("Error modifying foo: %s", err) + } + + // Updating a file should invalidate the cache + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + if id2 == id1 { + c.Fatal("Should not have used the cache") + } + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) + if id3 != id2 { + c.Fatal("Should have used the cache") + } +} + +func (s *DockerSuite) TestBuildAddMultipleLocalFileWithAndWithoutCache(c *check.C) { + name := "testbuildaddmultiplelocalfilewithcache" + baseName := name + "-base" + + cli.BuildCmd(c, baseName, build.WithDockerfile(` + FROM busybox + ENTRYPOINT ["/bin/sh"] + `)) + + dockerfile := ` + FROM testbuildaddmultiplelocalfilewithcache-base + MAINTAINER dockerio + ADD foo Dockerfile /usr/lib/bla/ + RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) + defer ctx.Close() + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + result2 := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + result3 := cli.BuildCmd(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) + if id1 != id2 { + c.Fatalf("The cache should have been used but hasn't: %s", result2.Stdout()) + } + if id1 == id3 { + c.Fatalf("The cache should have been invalided but hasn't: %s", result3.Stdout()) + } +} + +func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { + name := "testbuildcopydirbutnotfile" + name2 := "testbuildcopydirbutnotfile2" + + dockerfile := ` + FROM ` + minimalBaseImage() + ` + COPY dir /tmp/` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ + "dir/foo": "hello", + })) + defer ctx.Close() + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + // Check that adding file with similar name doesn't mess with cache + if err := ctx.Add("dir_file", "hello2"); err != nil { + c.Fatal(err) + } + cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { + name := "testbuildaddcurrentdirwithcache" + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) + defer ctx.Close() + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name3, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name3) + if id2 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content with different mtime does not + // invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + buildImageSuccessfully(c, name4, build.WithExternalBuildContext(ctx)) + id4 := getIDByName(c, name4) + if id3 != id4 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +// FIXME(vdemeester) this really seems to test the same thing as before (TestBuildAddMultipleLocalFileWithAndWithoutCache) +func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { + name := "testbuildaddcurrentdirwithoutcache" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) + defer ctx.Close() + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithAndWithoutCache(c *check.C) { + name := "testbuildaddremotefilewithcache" + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "baz": "hello", + })) + defer server.Close() + + dockerfile := fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()) + cli.BuildCmd(c, name, build.WithDockerfile(dockerfile)) + id1 := getIDByName(c, name) + cli.BuildCmd(c, name, build.WithDockerfile(dockerfile)) + id2 := getIDByName(c, name) + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id3 := getIDByName(c, name) + + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } + if id1 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { + name := "testbuildaddremotefilemtime" + name2 := name + "2" + name3 := name + "3" + + files := map[string]string{"baz": "hello"} + server := fakestorage.New(c, "", fakecontext.WithFiles(files)) + defer server.Close() + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()))) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't - #1") + } + + // Now create a different server with same contents (causes different mtime) + // The cache should still be used + + // allow some time for clock to pass as mtime precision is only 1s + time.Sleep(2 * time.Second) + + server2 := fakestorage.New(c, "", fakecontext.WithFiles(files)) + defer server2.Close() + + ctx2 := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server2.URL()))) + defer ctx2.Close() + cli.BuildCmd(c, name3, build.WithExternalBuildContext(ctx2)) + id3 := getIDByName(c, name3) + if id1 != id3 { + c.Fatal("The cache should have been used but wasn't") + } +} + +// FIXME(vdemeester) this really seems to test the same thing as before (combined) +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithAndWithoutCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithcache" + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ + "baz": "hello", + })) + defer server.Close() + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL())), + fakecontext.WithFiles(map[string]string{ + "foo": "hello world", + })) + defer ctx.Close() + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } + if id1 == id3 { + c.Fatal("The cache should have been invalidated but hasn't.") + } +} + +func testContextTar(c *check.C, compression archive.Compression) { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(`FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + }), + ) + defer ctx.Close() + context, err := archive.Tar(ctx.Dir, compression) + if err != nil { + c.Fatalf("failed to build context tar: %v", err) + } + name := "contexttar" + + cli.BuildCmd(c, name, build.WithStdinContext(context)) +} + +func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { + testContextTar(c, archive.Gzip) +} + +func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { + testContextTar(c, archive.Uncompressed) +} + +func (s *DockerSuite) TestBuildNoContext(c *check.C) { + name := "nocontext" + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-"}, + Stdin: strings.NewReader( + `FROM busybox + CMD ["echo", "ok"]`), + }).Assert(c, icmd.Success) + + if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { + c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + } +} + +func (s *DockerSuite) TestBuildDockerfileStdin(c *check.C) { + name := "stdindockerfile" + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("bar"), 0600) + c.Assert(err, check.IsNil) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir}, + Stdin: strings.NewReader( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`), + }).Assert(c, icmd.Success) + + res := inspectField(c, name, "Config.Cmd") + c.Assert(strings.TrimSpace(string(res)), checker.Equals, `[cat /foo]`) +} + +func (s *DockerSuite) TestBuildDockerfileStdinConflict(c *check.C) { + name := "stdindockerfiletarcontext" + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", "-"}, + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "use stdin for both build context and dockerfile", + }) +} + +func (s *DockerSuite) TestBuildDockerfileStdinNoExtraFiles(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, false, false) +} + +func (s *DockerSuite) TestBuildDockerfileStdinDockerignore(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, true, false) +} + +func (s *DockerSuite) TestBuildDockerfileStdinDockerignoreIgnored(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, true, true) +} + +func (s *DockerSuite) testBuildDockerfileStdinNoExtraFiles(c *check.C, hasDockerignore, ignoreDockerignore bool) { + name := "stdindockerfilenoextra" + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpDir) + + writeFile := func(filename, content string) { + err = ioutil.WriteFile(filepath.Join(tmpDir, filename), []byte(content), 0600) + c.Assert(err, check.IsNil) + } + + writeFile("foo", "bar") + + if hasDockerignore { + // Add an empty Dockerfile to verify that it is not added to the image + writeFile("Dockerfile", "") + + ignores := "Dockerfile\n" + if ignoreDockerignore { + ignores += ".dockerignore\n" + } + writeFile(".dockerignore", ignores) + } + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir}, + Stdin: strings.NewReader( + `FROM busybox +COPY . /baz`), + }) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "run", "--rm", name, "ls", "-A", "/baz") + if hasDockerignore && !ignoreDockerignore { + c.Assert(result.Stdout(), checker.Equals, ".dockerignore\nfoo\n") + } else { + c.Assert(result.Stdout(), checker.Equals, "foo\n") + } +} + +func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildimg" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`)) + + out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + if expected := "drw-------"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + if expected := "daemon daemon"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + +} + +// testing #1405 - config.Cmd does not get cleaned up if +// utilizing cache +func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { + name := "testbuildcmdcleanup" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo "hello"`)) + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox + RUN echo "hello" + ADD foo /foo + ENTRYPOINT ["/bin/echo"]`), + build.WithFile("foo", "hello"))) + + res := inspectField(c, name, "Config.Cmd") + // Cmd must be cleaned up + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } +} + +func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { + name := "testbuildaddnotfound" + expected := "foo: no such file or directory" + + if testEnv.DaemonPlatform() == "windows" { + expected = "foo: The system cannot find the file specified" + } + + buildImage(name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` + ADD foo /usr/local/bar`), + build.WithFile("bar", "hello"))).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: expected, + }) +} + +func (s *DockerSuite) TestBuildInheritance(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildinheritance" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 2375`)) + ports1 := inspectField(c, name, "Config.ExposedPorts") + + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name))) + + res := inspectField(c, name, "Config.Entrypoint") + if expected := "[/bin/echo]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + ports2 := inspectField(c, name, "Config.ExposedPorts") + if ports1 != ports2 { + c.Fatalf("Ports must be same: %s != %s", ports1, ports2) + } +} + +func (s *DockerSuite) TestBuildFails(c *check.C) { + name := "testbuildfails" + buildImage(name, build.WithDockerfile(`FROM busybox + RUN sh -c "exit 23"`)).Assert(c, icmd.Expected{ + ExitCode: 23, + Err: "returned a non-zero code: 23", + }) +} + +func (s *DockerSuite) TestBuildOnBuild(c *check.C) { + name := "testbuildonbuild" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ONBUILD RUN touch foobar`)) + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name))) +} + +// gh #2446 +func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { + makeLink := `ln -s /foo /bar` + if testEnv.DaemonPlatform() == "windows" { + makeLink = `mklink /D C:\bar C:\foo` + } + name := "testbuildaddtosymlinkdest" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + RUN sh -c "mkdir /foo" + RUN `+makeLink+` + ADD foo /bar/ + RUN sh -c "[ -f /bar/foo ]" + RUN sh -c "[ -f /foo/foo ]"`), + build.WithFile("foo", "hello"), + )) +} + +func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { + name := "testbuildescapewhitespace" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + # ESCAPE=\ + FROM busybox + MAINTAINER "Docker \ +IO " + `)) + + res := inspectField(c, name, "Author") + if res != "\"Docker IO \"" { + c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) + } + +} + +func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { + // Verify that strings that look like ints are still passed as strings + name := "testbuildstringing" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + MAINTAINER 123`)) + + out, _ := dockerCmd(c, "inspect", name) + if !strings.Contains(out, "\"123\"") { + c.Fatalf("Output does not contain the int as a string:\n%s", out) + } + +} + +func (s *DockerSuite) TestBuildDockerignore(c *check.C) { + name := "testbuilddockerignore" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ ! -e /bla/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ ! -e v.cc ]]" + RUN sh -c "[[ ! -e src/v.cc ]]" + RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".git/HEAD", "ref: foo"), + build.WithFile("src/x.go", "package main"), + build.WithFile("src/_vendor/v.go", "package main"), + build.WithFile("src/_vendor/v.cc", "package main"), + build.WithFile("src/v.cc", "package main"), + build.WithFile("v.cc", "package main"), + build.WithFile("dir/foo", ""), + build.WithFile(".gitignore", ""), + build.WithFile("README.md", "readme"), + build.WithFile(".dockerignore", ` +.git +pkg +.gitignore +src/_vendor +*.md +**/*.cc +dir`), + )) +} + +func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { + name := "testbuilddockerignorecleanpaths" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`), + build.WithFile("foo", "foo"), + build.WithFile("foo2", "foo2"), + build.WithFile("dir1/foo", "foo in dir1"), + build.WithFile(".dockerignore", "./foo\ndir1//foo\n./dir1/../foo2"), + )) +} + +func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { + name := "testbuilddockerignoreexceptions" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ -e /bla/dir/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/dir/foo1 ]]" + RUN sh -c "[[ -f /bla/dir/e ]]" + RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ -e /bla/dir/a.cc ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".git/HEAD", "ref: foo"), + build.WithFile("src/x.go", "package main"), + build.WithFile("src/_vendor/v.go", "package main"), + build.WithFile("dir/foo", ""), + build.WithFile("dir/foo1", ""), + build.WithFile("dir/dir/f1", ""), + build.WithFile("dir/dir/foo", ""), + build.WithFile("dir/e", ""), + build.WithFile("dir/e-dir/foo", ""), + build.WithFile(".gitignore", ""), + build.WithFile("README.md", "readme"), + build.WithFile("dir/a.cc", "hello"), + build.WithFile(".dockerignore", ` +.git +pkg +.gitignore +src/_vendor +*.md +dir +!dir/e* +!dir/dir/foo +**/*.cc +!**/*.cc`), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "Dockerfile\n"), + )) + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "./Dockerfile\n"), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls /tmp/Dockerfile + RUN sh -c "! ls /tmp/MyDockerfile" + RUN ls /tmp/.dockerignore` + buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, + build.WithFile("Dockerfile", "Should not use me"), + build.WithFile("MyDockerfile", dockerfile), + build.WithFile(".dockerignore", "MyDockerfile\n"), + )) + buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, + build.WithFile("Dockerfile", "Should not use me"), + build.WithFile("MyDockerfile", dockerfile), + build.WithFile(".dockerignore", "./MyDockerfile\n"), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { + name := "testbuilddockerignoredockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/.dockerignore" + RUN ls /tmp/Dockerfile` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", ".dockerignore\n"), + )) +} + +func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { + name := "testbuilddockerignoretouchdockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + ".dockerignore": "Dockerfile\n", + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Didn't use the cache - 1") + } + + // Now make sure touching Dockerfile doesn't invalidate the cache + if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Didn't use the cache - 2") + } + + // One more time but just 'touch' it instead of changing the content + if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Didn't use the cache - 3") + } +} + +func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { + name := "testbuilddockerignorewholedir" + + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ ! -e /Makefile ]]"` + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "*\n"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringOnlyDotfiles(c *check.C) { + name := "testbuilddockerignorewholedir" + + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", ".*"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + )) +} + +func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { + name := "testbuilddockerignorebadexclusion" + buildImage(name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + build.WithFile(".dockerignore", "!\n"), + )).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error checking context: 'illegal exclusion pattern: \"!\"", + }) +} + +func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.dockerignore ]]" + RUN sh -c "[[ ! -e /Dockerfile ]]" + RUN sh -c "[[ ! -e /file1 ]]" + RUN sh -c "[[ ! -e /dir ]]"` + + // All of these should result in ignoring all files + for _, variant := range []string{"**", "**/", "**/**", "*"} { + buildImageSuccessfully(c, "noname", build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("file1", ""), + build.WithFile("dir/file1", ""), + build.WithFile(".dockerignore", variant), + )) + + dockerCmd(c, "rmi", "noname") + } +} + +func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + #RUN sh -c "[[ -e /.dockerignore ]]" + RUN sh -c "[[ -e /Dockerfile ]] && \ + [[ ! -e /file0 ]] && \ + [[ ! -e /dir1/file0 ]] && \ + [[ ! -e /dir2/file0 ]] && \ + [[ ! -e /file1 ]] && \ + [[ ! -e /dir1/file1 ]] && \ + [[ ! -e /dir1/dir2/file1 ]] && \ + [[ ! -e /dir1/file2 ]] && \ + [[ -e /dir1/dir2/file2 ]] && \ + [[ ! -e /dir1/dir2/file4 ]] && \ + [[ ! -e /dir1/dir2/file5 ]] && \ + [[ ! -e /dir1/dir2/file6 ]] && \ + [[ ! -e /dir1/dir3/file7 ]] && \ + [[ ! -e /dir1/dir3/file8 ]] && \ + [[ -e /dir1/dir3 ]] && \ + [[ -e /dir1/dir4 ]] && \ + [[ ! -e 'dir1/dir5/fileAA' ]] && \ + [[ -e 'dir1/dir5/fileAB' ]] && \ + [[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing + + RUN echo all done!` + + dockerignore := ` +**/file0 +**/*file1 +**/dir1/file2 +dir1/**/file4 +**/dir2/file5 +**/dir1/dir2/file6 +dir1/dir3/** +**/dir4/** +**/file?A +**/file\?B +**/dir5/file. +` + + buildImageSuccessfully(c, "noname", build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", dockerignore), + build.WithFile("dir1/file0", ""), + build.WithFile("dir1/dir2/file0", ""), + build.WithFile("file1", ""), + build.WithFile("dir1/file1", ""), + build.WithFile("dir1/dir2/file1", ""), + build.WithFile("dir1/file2", ""), + build.WithFile("dir1/dir2/file2", ""), // remains + build.WithFile("dir1/dir2/file4", ""), + build.WithFile("dir1/dir2/file5", ""), + build.WithFile("dir1/dir2/file6", ""), + build.WithFile("dir1/dir3/file7", ""), + build.WithFile("dir1/dir3/file8", ""), + build.WithFile("dir1/dir4/file9", ""), + build.WithFile("dir1/dir5/fileAA", ""), + build.WithFile("dir1/dir5/fileAB", ""), + build.WithFile("dir1/dir5/fileB", ""), + )) +} + +func (s *DockerSuite) TestBuildLineBreak(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildlinebreak" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`)) +} + +func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildeolinline" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox +RUN sh -c 'echo root:testpass > /tmp/passwd' +RUN echo "foo \n bar"; echo "baz" +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`)) +} + +func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcomments" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox +# This is an ordinary comment. +RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh +RUN [ ! -x /hello.sh ] +# comment with line break \ +RUN chmod +x /hello.sh +RUN [ -x /hello.sh ] +RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] +RUN [ "$(/hello.sh)" = "hello world" ]`)) +} + +func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildusers" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \ + echo 'dockerio:x:1001:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \ + # Add a "supplementary" group for our dockerio user + echo 'supplementary:x:1002:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] +USER 1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER dockerio:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`)) +} + +// FIXME(vdemeester) rename this test (and probably "merge" it with the one below TestBuildEnvUsage2) +func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage" + dockerfile := `FROM busybox +ENV HOME /root +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] +ENV FOO /foo/baz +ENV BAR /bar +ENV BAZ $BAR +ENV FOOPATH $PATH:$FOO +RUN [ "$BAR" = "$BAZ" ] +RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] +ENV FROM hello/docker/world +ENV TO /docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc=def +ENV ghi=$abc +RUN [ "$ghi" = "def" ] +` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("hello/docker/world", "hello"), + )) +} + +// FIXME(vdemeester) rename this test (and probably "merge" it with the one above TestBuildEnvUsage) +func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage2" + dockerfile := `FROM busybox +ENV abc=def def="hello world" +RUN [ "$abc,$def" = "def,hello world" ] +ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too" +RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ] +ENV abc=zzz FROM=hello/docker/world +ENV abc=zzz TO=/docker/world/hello +ADD $FROM $TO +RUN [ "$abc,$(cat $TO)" = "zzz,hello" ] +ENV abc 'yyy' +RUN [ $abc = 'yyy' ] +ENV abc= +RUN [ "$abc" = "" ] + +# use grep to make sure if the builder substitutes \$foo by mistake +# we don't get a false positive +ENV abc=\$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) +ENV abc \$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) + +ENV abc=\'foo\' abc2=\"foo\" +RUN [ "$abc,$abc2" = "'foo',\"foo\"" ] +ENV abc "foo" +RUN [ "$abc" = "foo" ] +ENV abc 'foo' +RUN [ "$abc" = 'foo' ] +ENV abc \'foo\' +RUN [ "$abc" = "'foo'" ] +ENV abc \"foo\" +RUN [ "$abc" = '"foo"' ] + +ENV abc=ABC +RUN [ "$abc" = "ABC" ] +ENV def1=${abc:-DEF} def2=${ccc:-DEF} +ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:} +RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ] +ENV mypath=${mypath:+$mypath:}/home +ENV mypath=${mypath:+$mypath:}/away +RUN [ "$mypath" = '/home:/away' ] + +ENV e1=bar +ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11 +RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] + +ENV ee1 bar +ENV ee2 $ee1 +ENV ee3 $ee11 +ENV ee4 \$ee1 +ENV ee5 \$ee11 +RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] + +ENV eee1="foo" eee2='foo' +ENV eee3 "foo" +ENV eee4 'foo' +RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] + +` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("hello/docker/world", "hello"), + )) +} + +func (s *DockerSuite) TestBuildAddScript(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddscript" + dockerfile := ` +FROM busybox +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ]` + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("test", "#!/bin/sh\necho 'test!' > /testfile"), + )) +} + +func (s *DockerSuite) TestBuildAddTar(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddtar" + + ctx := func() *fakecontext.Fake { + dockerfile := ` +FROM busybox +ADD test.tar / +RUN cat /test/foo | grep Hi +ADD test.tar /test.tar +RUN cat /test.tar/test/foo | grep Hi +ADD test.tar /unlikely-to-exist +RUN cat /unlikely-to-exist/test/foo | grep Hi +ADD test.tar /unlikely-to-exist-trailing-slash/ +RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi +RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir +ADD test.tar /existing-directory +RUN cat /existing-directory/test/foo | grep Hi +ADD test.tar /existing-directory-trailing-slash/ +RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) +} + +func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { + name := "testbuildaddbrokentar" + + ctx := func() *fakecontext.Fake { + dockerfile := ` +FROM busybox +ADD test.tar /` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + // Corrupt the tar by removing one byte off the end + stat, err := testTar.Stat() + if err != nil { + c.Fatalf("failed to stat tar archive: %v", err) + } + if err := testTar.Truncate(stat.Size() - 1); err != nil { + c.Fatalf("failed to truncate tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + defer ctx.Close() + + buildImage(name, build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { + name := "testbuildaddnontar" + + // Should not try to extract test.tar + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD test.tar / + RUN test -f /test.tar`), + build.WithFile("test.tar", "not_a_tar_file"), + )) +} + +func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxz" + + ctx := func() *fakecontext.Fake { + dockerfile := ` + FROM busybox + ADD test.tar.xz / + RUN cat /test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + icmd.RunCmd(icmd.Cmd{ + Command: []string{"xz", "-k", "test.tar"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) +} + +func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxzgz" + + ctx := func() *fakecontext.Fake { + dockerfile := ` + FROM busybox + ADD test.tar.xz.gz / + RUN ls /test.tar.xz.gz` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + icmd.RunCmd(icmd.Cmd{ + Command: []string{"xz", "-k", "test.tar"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{"gzip", "test.tar.xz"}, + Dir: tmpDir, + }) + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) +} + +func (s *DockerSuite) TestBuildFromGit(c *check.C) { + name := "testbuildfromgit" + git := fakegit.New(c, "repo", map[string]string{ + "Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "first": "test git data", + }, true) + defer git.Close() + + buildImageSuccessfully(c, name, build.WithContextPath(git.RepoURL)) + + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) { + name := "testbuildfromgit" + git := fakegit.New(c, "repo", map[string]string{ + "docker/Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "docker/first": "test git data", + }, true) + defer git.Close() + + buildImageSuccessfully(c, name, build.WithContextPath(fmt.Sprintf("%s#master:docker", git.RepoURL))) + + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) { + name := "testbuildfromgitwithf" + git := fakegit.New(c, "repo", map[string]string{ + "myApp/myDockerfile": `FROM busybox + RUN echo hi from Dockerfile`, + }, true) + defer git.Close() + + buildImage(name, cli.WithFlags("-f", "myApp/myDockerfile"), build.WithContextPath(git.RepoURL)).Assert(c, icmd.Expected{ + Out: "hi from Dockerfile", + }) +} + +func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { + name := "testbuildfromremotetarball" + + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + MAINTAINER docker`) + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "testT.tar": buffer, + })) + defer server.Close() + + cli.BuildCmd(c, name, build.WithContextPath(server.URL()+"/testT.tar")) + + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { + name := "testbuildcmdcleanuponentrypoint" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + CMD ["test"] + ENTRYPOINT ["echo"]`)) + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name))) + + res := inspectField(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } + res = inspectField(c, name, "Config.Entrypoint") + if expected := "[cat]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildClearCmd(c *check.C) { + name := "testbuildclearcmd" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/bash"] + CMD []`)) + + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected %s", res, "[]") + } +} + +func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + name := "testbuildemptycmd" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")) + + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "null" { + c.Fatalf("Cmd %s, expected %s", res, "null") + } +} + +func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { + name := "testbuildonbuildparent" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nONBUILD RUN echo foo\n")) + + buildImage(name, build.WithDockerfile("FROM "+name+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{ + Out: "# Executing 1 build trigger", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { + name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) + buildImage(name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "invalid reference format", + }) +} + +func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { + name := "testbuildcmdshc" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD echo cmd\n")) + + res := inspectFieldJSON(c, name, "Config.Cmd") + expected := `["/bin/sh","-c","echo cmd"]` + if testEnv.DaemonPlatform() == "windows" { + expected = `["cmd","/S","/C","echo cmd"]` + } + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { + // Test to make sure that when we strcat arrays we take into account + // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't + // look the same + name := "testbuildcmdspaces" + + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo hi\"]\n")) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"hi\"]\n")) + id2 := getIDByName(c, name) + + if id1 == id2 { + c.Fatal("Should not have resulted in the same CMD") + } + + // Now do the same with ENTRYPOINT + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo hi\"]\n")) + id1 = getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n")) + id2 = getIDByName(c, name) + + if id1 == id2 { + c.Fatal("Should not have resulted in the same ENTRYPOINT") + } +} + +func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { + name := "testbuildcmdjson" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"cmd\"]")) + + res := inspectFieldJSON(c, name, "Config.Cmd") + expected := `["echo","cmd"]` + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } +} + +func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChild(c *check.C) { + buildImageSuccessfully(c, "parent", build.WithDockerfile(` + FROM busybox + ENTRYPOINT exit 130 + `)) + + icmd.RunCommand(dockerBinary, "run", "parent").Assert(c, icmd.Expected{ + ExitCode: 130, + }) + + buildImageSuccessfully(c, "child", build.WithDockerfile(` + FROM parent + ENTRYPOINT exit 5 + `)) + + icmd.RunCommand(dockerBinary, "run", "child").Assert(c, icmd.Expected{ + ExitCode: 5, + }) +} + +func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChildInspect(c *check.C) { + var ( + name = "testbuildepinherit" + name2 = "testbuildepinherit2" + expected = `["/bin/sh","-c","echo quux"]` + ) + + if testEnv.DaemonPlatform() == "windows" { + expected = `["cmd","/S","/C","echo quux"]` + } + + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT /foo/bar")) + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name))) + + res := inspectFieldJSON(c, name2, "Config.Entrypoint") + if res != expected { + c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + } + + icmd.RunCommand(dockerBinary, "run", name2).Assert(c, icmd.Expected{ + Out: "quux", + }) +} + +func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { + name := "testbuildentrypoint" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT echo`)) + dockerCmd(c, "run", "--rm", name) +} + +func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildexoticshellinterpolation" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + + ENV SOME_VAR a.b.c + + RUN [ "$SOME_VAR" = 'a.b.c' ] + RUN [ "${SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR%.*}" = 'a.b' ] + RUN [ "${SOME_VAR%%.*}" = 'a' ] + RUN [ "${SOME_VAR#*.}" = 'b.c' ] + RUN [ "${SOME_VAR##*.}" = 'c' ] + RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] + RUN [ "${#SOME_VAR}" = '5' ] + + RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] + RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] + RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] + `)) +} + +func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { + // This testcase is supposed to generate an error because the + // JSON array we're passing in on the CMD uses single quotes instead + // of double quotes (per the JSON spec). This means we interpret it + // as a "string" instead of "JSON array" and pass it on to "sh -c" and + // it should barf on it. + name := "testbuildsinglequotefails" + expectedExitCode := 2 + if testEnv.DaemonPlatform() == "windows" { + expectedExitCode = 127 + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`)) + + icmd.RunCommand(dockerBinary, "run", "--rm", name).Assert(c, icmd.Expected{ + ExitCode: expectedExitCode, + }) +} + +func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { + name := "testbuildverboseout" + expected := "\n123\n" + + if testEnv.DaemonPlatform() == "windows" { + expected = "\n123\r\n" + } + + buildImage(name, build.WithDockerfile(`FROM busybox +RUN echo 123`)).Assert(c, icmd.Expected{ + Out: expected, + }) +} + +func (s *DockerSuite) TestBuildWithTabs(c *check.C) { + name := "testbuildwithtabs" + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nRUN echo\tone\t\ttwo")) + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` + expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + if testEnv.DaemonPlatform() == "windows" { + expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]` + expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + } + if res != expected1 && res != expected2 { + c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) + } +} + +func (s *DockerSuite) TestBuildLabels(c *check.C) { + name := "testbuildlabel" + expected := `{"License":"GPL","Vendor":"Acme"}` + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme + LABEL License GPL`)) + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { + name := "testbuildlabelcache" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme`)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme`)) + id2 := getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Build 2 should have worked & used cache(%s,%s)", id1, id2) + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme1`)) + id2 = getIDByName(c, name) + if id1 == id2 { + c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s)", id1, id2) + } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor Acme`)) + id2 = getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Build 4 should have worked & used cache(%s,%s)", id1, id2) + } + + // Now make sure the cache isn't used by mistake + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(`FROM busybox + LABEL f1=b1 f2=b2`)) + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL f1=b1 f2=b2`)) + id2 = getIDByName(c, name) + if id1 == id2 { + c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s)", id1, id2) + } + +} + +func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { + // This test makes sure that -q works correctly when build is successful: + // stdout has only the image ID (long image ID) and stderr is empty. + outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") + buildFlags := cli.WithFlags("-q") + + tt := []struct { + Name string + BuildFunc func(string) *icmd.Result + }{ + { + Name: "quiet_build_stdin_success", + BuildFunc: func(name string) *icmd.Result { + return buildImage(name, buildFlags, build.WithDockerfile("FROM busybox")) + }, + }, + { + Name: "quiet_build_ctx_success", + BuildFunc: func(name string) *icmd.Result { + return buildImage(name, buildFlags, build.WithBuildContext(c, + build.WithFile("Dockerfile", "FROM busybox"), + build.WithFile("quiet_build_success_fctx", "test"), + )) + }, + }, + { + Name: "quiet_build_git_success", + BuildFunc: func(name string) *icmd.Result { + git := fakegit.New(c, "repo", map[string]string{ + "Dockerfile": "FROM busybox", + }, true) + return buildImage(name, buildFlags, build.WithContextPath(git.RepoURL)) + }, + }, + } + + for _, te := range tt { + result := te.BuildFunc(te.Name) + result.Assert(c, icmd.Success) + if outRegexp.Find([]byte(result.Stdout())) == nil { + c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, result.Stdout()) + } + + if result.Stderr() != "" { + c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, result.Stderr()) + } + } + +} + +func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + testRequires(c, Network) + testName := "quiet_build_not_exists_image" + dockerfile := "FROM busybox11" + quietResult := buildImage(testName, cli.WithFlags("-q"), build.WithDockerfile(dockerfile)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(testName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + if quietResult.Stderr() != result.Combined() { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, quietResult.Stderr(), result.Combined())) + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + testCases := []struct { + testName string + dockerfile string + }{ + {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, + {"quiet_build_unknown_instr", "FROMD busybox"}, + } + + for _, tc := range testCases { + quietResult := buildImage(tc.testName, cli.WithFlags("-q"), build.WithDockerfile(tc.dockerfile)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(tc.testName, build.WithDockerfile(tc.dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + if quietResult.Stderr() != result.Combined() { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", tc.testName, quietResult.Stderr(), result.Combined())) + } + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { + // This test ensures that when given a wrong URL, stderr in quiet mode and + // stderr in verbose mode are identical. + // TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout + URL := "http://something.invalid" + name := "quiet_build_wrong_remote" + quietResult := buildImage(name, cli.WithFlags("-q"), build.WithContextPath(URL)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(name, build.WithContextPath(URL)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + if strings.TrimSpace(quietResult.Stderr()) != strings.TrimSpace(result.Combined()) { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", name, quietResult.Stderr(), result.Combined())) + } +} + +func (s *DockerSuite) TestBuildStderr(c *check.C) { + // This test just makes sure that no non-error output goes + // to stderr + name := "testbuildstderr" + result := buildImage(name, build.WithDockerfile("FROM busybox\nRUN echo one")) + result.Assert(c, icmd.Success) + + // Windows to non-Windows should have a security warning + if runtime.GOOS == "windows" && testEnv.DaemonPlatform() != "windows" && !strings.Contains(result.Stdout(), "SECURITY WARNING:") { + c.Fatalf("Stdout contains unexpected output: %q", result.Stdout()) + } + + // Stderr should always be empty + if result.Stderr() != "" { + c.Fatalf("Stderr should have been empty, instead it's: %q", result.Stderr()) + } +} + +func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { + testRequires(c, UnixCli, DaemonIsLinux) // test uses chown: not available on windows + + name := "testbuildchownsinglefile" + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` +FROM busybox +COPY test / +RUN ls -l /test +RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] +`), + fakecontext.WithFiles(map[string]string{ + "test": "test", + })) + defer ctx.Close() + + if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { + c.Fatal(err) + } + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) +} + +func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { + name := "testbuildsymlinkbreakout" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` + from busybox + add symlink.tar / + add inject /symlink/ + `), 0644); err != nil { + c.Fatal(err) + } + inject := filepath.Join(ctx, "inject") + if err := ioutil.WriteFile(inject, nil, 0644); err != nil { + c.Fatal(err) + } + f, err := os.Create(filepath.Join(ctx, "symlink.tar")) + if err != nil { + c.Fatal(err) + } + w := tar.NewWriter(f) + w.WriteHeader(&tar.Header{ + Name: "symlink2", + Typeflag: tar.TypeSymlink, + Linkname: "/../../../../../../../../../../../../../../", + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.WriteHeader(&tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: filepath.Join("symlink2", tmpdir), + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.Close() + f.Close() + + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(fakecontext.New(c, ctx))) + if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { + c.Fatal("symlink breakout - inject") + } else if !os.IsNotExist(err) { + c.Fatalf("unexpected error: %v", err) + } +} + +func (s *DockerSuite) TestBuildXZHost(c *check.C) { + // /usr/local/sbin/xz gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildxzhost" + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` +FROM busybox +ADD xz /usr/local/sbin/ +RUN chmod 755 /usr/local/sbin/xz +ADD test.xz / +RUN [ ! -e /injected ]`), + build.WithFile("test.xz", "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00"+"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd"+"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21"), + build.WithFile("xz", "#!/bin/sh\ntouch /injected"), + )) +} + +func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { + // /foo/file gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127 + var ( + name = "testbuildvolumescontent" + expected = "some text" + volName = "/foo" + ) + + if testEnv.DaemonPlatform() == "windows" { + volName = "C:/foo" + } + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` +FROM busybox +COPY content /foo/file +VOLUME `+volName+` +CMD cat /foo/file`), + build.WithFile("content", expected), + )) + + out, _ := dockerCmd(c, "run", "--rm", name) + if out != expected { + c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) + } + +} + +// FIXME(vdemeester) part of this should be unit test, other part should be clearer +func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { + ctx := fakecontext.New(c, "", fakecontext.WithFiles(map[string]string{ + "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", + "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", + "files/dFile": "FROM busybox\nRUN echo from files/dFile", + "dFile": "FROM busybox\nRUN echo from dFile", + "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", + })) + defer ctx.Close() + + cli.Docker(cli.Args("build", "-t", "test1", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) + + cli.Docker(cli.Args("build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + Out: "from files/Dockerfile", + }) + + cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + Out: "from files/dFile", + }) + + cli.Docker(cli.Args("build", "--file=dFile", "-t", "test4", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + Out: "from dFile", + }) + + dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") + c.Assert(err, check.IsNil) + nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") + if _, err = os.Create(nonDockerfileFile); err != nil { + c.Fatal(err) + } + cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: fmt.Sprintf("unable to prepare context: the Dockerfile (%s) must be within the build context", nonDockerfileFile), + }) + + cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) + + cli.Docker(cli.Args("build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ + Out: "from files/Dockerfile", + }) + + cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "must be within the build context", + }) + + tmpDir := os.TempDir() + cli.Docker(cli.Args("build", "-t", "test9", ctx.Dir), cli.InDir(tmpDir)).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) + + cli.Docker(cli.Args("build", "-f", "dFile2", "-t", "test10", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{ + Out: "from files/dFile2", + }) +} + +func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + testRequires(c, DaemonIsLinux) + + // If Dockerfile is not present, use dockerfile + buildImage("test1", build.WithBuildContext(c, + build.WithFile("dockerfile", `FROM busybox + RUN echo from dockerfile`), + )).Assert(c, icmd.Expected{ + Out: "from dockerfile", + }) + + // Prefer Dockerfile in place of dockerfile + buildImage("test1", build.WithBuildContext(c, + build.WithFile("dockerfile", `FROM busybox + RUN echo from dockerfile`), + build.WithFile("Dockerfile", `FROM busybox + RUN echo from Dockerfile`), + )).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) +} + +func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"baz": `FROM busybox +RUN echo from baz +COPY * /tmp/ +RUN find /tmp/`})) + defer server.Close() + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox + RUN echo from Dockerfile`)) + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", server.URL()+"/baz"), func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + return nil + }) + + if !strings.Contains(result.Combined(), "from baz") || + strings.Contains(result.Combined(), "/tmp/baz") || + !strings.Contains(result.Combined(), "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", result.Combined()) + } + +} + +func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +RUN echo "from Dockerfile"`)) + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", "-"), func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Stdin = strings.NewReader(`FROM busybox +RUN echo "from baz" +COPY * /tmp/ +RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) + return nil + }) + + if !strings.Contains(result.Combined(), "from baz") || + strings.Contains(result.Combined(), "/tmp/baz") || + !strings.Contains(result.Combined(), "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", result.Combined()) + } + +} + +func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { + name := "testbuildfromofficial" + fromNames := []string{ + "busybox", + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + for idx, fromName := range fromNames { + imgName := fmt.Sprintf("%s%d", name, idx) + buildImageSuccessfully(c, imgName, build.WithDockerfile("FROM "+fromName)) + dockerCmd(c, "rmi", imgName) + } +} + +func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { + testRequires(c, UnixCli, DaemonIsLinux) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) + + name := "testbuilddockerfileoutsidecontext" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { + c.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(wd) + if err := os.Chdir(ctx); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { + c.Fatal(err) + } + + for _, dockerfilePath := range []string{ + filepath.Join("..", "outsideDockerfile"), + filepath.Join(ctx, "dockerfile1"), + filepath.Join(ctx, "dockerfile2"), + } { + result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") + c.Assert(result, icmd.Matches, icmd.Expected{ + Err: "must be within the build context", + ExitCode: 1, + }) + deleteImages(name) + } + + os.Chdir(tmpdir) + + // Path to Dockerfile should be resolved relative to working directory, not relative to context. + // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail + out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) + if err == nil { + c.Fatalf("Expected error. Out: %s", out) + } +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildSpaces(c *check.C) { + // Test to make sure that leading/trailing spaces on a command + // doesn't change the error msg we get + name := "testspaces" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nCOPY\n")) + defer ctx.Close() + + result1 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx)) + result1.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + ctx.Add("Dockerfile", "FROM busybox\nCOPY ") + result2 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + removeLogTimestamps := func(s string) string { + return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) + } + + // Skip over the times + e1 := removeLogTimestamps(result1.Error.Error()) + e2 := removeLogTimestamps(result2.Error.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", result1.Error, result2.Error) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY") + result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + // Skip over the times + e1 = removeLogTimestamps(result1.Error.Error()) + e2 = removeLogTimestamps(result2.Error.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", result1.Error, result2.Error) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY ") + result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + // Skip over the times + e1 = removeLogTimestamps(result1.Error.Error()) + e2 = removeLogTimestamps(result2.Error.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", result1.Error, result2.Error) + } + +} + +func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { + // Test to make sure that spaces in quotes aren't lost + name := "testspacesquotes" + + dockerfile := `FROM busybox +RUN echo " \ + foo "` + + expected := "\n foo \n" + // Windows uses the builtin echo, which preserves quotes + if testEnv.DaemonPlatform() == "windows" { + expected = "\" foo \"" + } + + buildImage(name, build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{ + Out: expected, + }) +} + +// #4393 +func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This should error out + buildImage("docker-test-errcreatevolumewithfile", build.WithDockerfile(` + FROM busybox + RUN touch /foo + VOLUME /foo + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "file exists", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { + // Test to make sure that all Dockerfile commands (except the ones listed + // in skipCmds) will generate an error if no args are provided. + // Note: INSERT is deprecated so we exclude it because of that. + skipCmds := map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + } + + if testEnv.DaemonPlatform() == "windows" { + skipCmds = map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + "STOPSIGNAL": {}, + "ARG": {}, + "USER": {}, + "EXPOSE": {}, + } + } + + for cmd := range command.Commands { + cmd = strings.ToUpper(cmd) + if _, ok := skipCmds[cmd]; ok { + continue + } + var dockerfile string + if cmd == "FROM" { + dockerfile = cmd + } else { + // Add FROM to make sure we don't complain about it missing + dockerfile = "FROM busybox\n" + cmd + } + + buildImage("args", build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: cmd + " requires", + }) + } + +} + +func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + buildImage("sc", build.WithDockerfile("FROM scratch")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No image was generated", + }) +} + +func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { + buildImageSuccessfully(c, "sc", build.WithBuildContext(c, + build.WithFile("Dockerfile", "FROM busybox\n"), + build.WithFile("..gitme", ""), + )) +} + +func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { + testRequires(c, DaemonIsLinux) // No hello-world Windows image + name := "testbuildrunonejson" + + buildImage(name, build.WithDockerfile(`FROM hello-world:frozen +RUN [ "/hello" ]`)).Assert(c, icmd.Expected{ + Out: "Hello from Docker", + }) +} + +func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { + name := "testbuildemptystringvolume" + + buildImage(name, build.WithDockerfile(` + FROM busybox + ENV foo="" + VOLUME $foo + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + cgroupParent := "test" + data, err := ioutil.ReadFile("/proc/self/cgroup") + if err != nil { + c.Fatalf("failed to read '/proc/self/cgroup - %v", err) + } + selfCgroupPaths := testutil.ParseCgroupPaths(string(data)) + _, found := selfCgroupPaths["memory"] + if !found { + c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) + } + result := buildImage("buildcgroupparent", + cli.WithFlags("--cgroup-parent", cgroupParent), + build.WithDockerfile(` +FROM busybox +RUN cat /proc/self/cgroup +`)) + result.Assert(c, icmd.Success) + m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), result.Combined()) + c.Assert(err, check.IsNil) + if !m { + c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, result.Combined()) + } +} + +// FIXME(vdemeester) could be a unit test +func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { + // Check to make sure our build output prints the Dockerfile cmd + // property - there was a bug that caused it to be duplicated on the + // Step X line + name := "testbuildnodupoutput" + result := buildImage(name, build.WithDockerfile(` + FROM busybox + RUN env`)) + result.Assert(c, icmd.Success) + exp := "\nStep 2/2 : RUN env\n" + if !strings.Contains(result.Combined(), exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp) + } +} + +// GH15826 +// FIXME(vdemeester) could be a unit test +func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { + // Explicit check to ensure that build starts from step 1 rather than 0 + name := "testbuildstartsfromone" + result := buildImage(name, build.WithDockerfile(`FROM busybox`)) + result.Assert(c, icmd.Success) + exp := "\nStep 1/1 : FROM busybox\n" + if !strings.Contains(result.Combined(), exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp) + } +} + +func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { + // Test to make sure the bad command is quoted with just "s and + // not as a Go []string + name := "testbuildbadrunerrmsg" + shell := "/bin/sh -c" + exitCode := 127 + if testEnv.DaemonPlatform() == "windows" { + shell = "cmd /S /C" + // architectural - Windows has to start the container to determine the exe is bad, Linux does not + exitCode = 1 + } + exp := fmt.Sprintf(`The command '%s badEXE a1 \& a2 a3' returned a non-zero code: %d`, shell, exitCode) + + buildImage(name, build.WithDockerfile(` + FROM busybox + RUN badEXE a1 \& a2 a3`)).Assert(c, icmd.Expected{ + ExitCode: exitCode, + Err: exp, + }) +} + +func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-build") + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuild" + + buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + Out: fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7]), + }) + + // We should also have a tag reference for the image. + dockerCmd(c, "inspect", repoName) + + // We should now be able to remove the tag reference. + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuilduntrustedtag" + + buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "does not have trust data for", + }) +} + +func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tempDir) + + // Make a real context directory in this temp directory with a simple + // Dockerfile. + realContextDirname := filepath.Join(tempDir, "context") + if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { + c.Fatal(err) + } + + if err = ioutil.WriteFile( + filepath.Join(realContextDirname, "Dockerfile"), + []byte(` + FROM busybox + RUN echo hello world + `), + os.FileMode(0644), + ); err != nil { + c.Fatal(err) + } + + // Make a symlink to the real context directory. + contextSymlinkName := filepath.Join(tempDir, "context_link") + if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { + c.Fatal(err) + } + + // Executing the build with the symlink as the specified context should + // *not* fail. + dockerCmd(c, "build", contextSymlinkName) +} + +func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create the releases role + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the releases role + otherTag := fmt.Sprintf("%s:other", repoName) + cli.DockerCmd(c, "tag", "busybox", otherTag) + + cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "other", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + cli.DockerCmd(c, "rmi", otherTag) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + name := "testtrustedbuildreleasesrole" + cli.BuildCmd(c, name, trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + Out: fmt.Sprintf("FROM %s@sha", repoName), + }) +} + +func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create a non-releases delegation role + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the other role + otherTag := fmt.Sprintf("%s:other", repoName) + cli.DockerCmd(c, "tag", "busybox", otherTag) + + cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "other", "targets/other") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + cli.DockerCmd(c, "rmi", otherTag) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + + name := "testtrustedbuildotherrole" + cli.Docker(cli.Build(name), trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +// Issue #15634: COPY fails when path starts with "null" +func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { + name := "testbuildnullstringinaddcopyvolume" + volName := "nullvolume" + if testEnv.DaemonPlatform() == "windows" { + volName = `C:\\nullvolume` + } + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + + ADD null / + COPY nullfile / + VOLUME `+volName+` + `), + build.WithFile("null", "test1"), + build.WithFile("nullfile", "test2"), + )) +} + +func (s *DockerSuite) TestBuildStopSignal(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet + imgName := "test_build_stop_signal" + buildImageSuccessfully(c, imgName, build.WithDockerfile(`FROM busybox + STOPSIGNAL SIGKILL`)) + res := inspectFieldJSON(c, imgName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } + + containerName := "test-container-stop-signal" + dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top") + res = inspectFieldJSON(c, containerName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + var dockerfile string + if testEnv.DaemonPlatform() == "windows" { + // Bugs in Windows busybox port - use the default base image and native cmd stuff + dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+` + ARG %s + RUN echo %%%s%% + CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey) + } else { + dockerfile = fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s + CMD echo $%s`, envKey, envKey, envKey) + + } + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: envVal, + }) + + containerName := "bldargCont" + out, _ := dockerCmd(c, "run", "--name", containerName, imgName) + out = strings.Trim(out, " \r\n'") + if out != "" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envDef := "bar1" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s`, envKey, envDef) + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: envVal, + }) + + out, _ := dockerCmd(c, "history", "--no-trunc", imgName) + outputTabs := strings.Split(out, "\n")[1] + if !strings.Contains(outputTabs, envDef) { + c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef) + } +} + +func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + proxy := "HTTP_PROXY=http://user:password@proxy.example.com" + explicitProxyKey := "http_proxy" + explicitProxyVal := "http://user:password@someproxy.example.com" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ARG %s + RUN echo "Testing Build Args!"`, envKey, explicitProxyKey) + + buildImage := func(imgName string) string { + cli.BuildCmd(c, imgName, + cli.WithFlags("--build-arg", "https_proxy=https://proxy.example.com", + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + "--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal), + "--build-arg", proxy), + build.WithDockerfile(dockerfile), + ) + return getIDByName(c, imgName) + } + + origID := buildImage(imgName) + result := cli.DockerCmd(c, "history", "--no-trunc", imgName) + out := result.Stdout() + + if strings.Contains(out, proxy) { + c.Fatalf("failed to exclude proxy settings from history!") + } + if strings.Contains(out, "https_proxy") { + c.Fatalf("failed to exclude proxy settings from history!") + } + result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", envKey, envVal)}) + result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal)}) + + cacheID := buildImage(imgName + "-two") + c.Assert(origID, checker.Equals, cacheID) +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) + + imgNameCache := "bldargtestcachehit" + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgName) + if newImgID != origImgID { + c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + extraEnvKey := "foo1" + extraEnvVal := "bar1" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ARG %s + RUN echo $%s`, envKey, extraEnvKey, envKey) + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) + + imgNameCache := "bldargtestcachemiss" + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags( + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal), + ), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgNameCache) + + if newImgID == origImgID { + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + newEnvVal := "bar1" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) + + imgNameCache := "bldargtestcachemiss" + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal)), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgNameCache) + if newImgID == origImgID { + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + RUN echo $%s + CMD echo $%s + `, envKey, envKey, envValOveride, envKey, envKey) + + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOveride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +// FIXME(vdemeester) might be useful to merge with the one above ? +func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + dockerfile := fmt.Sprintf(`FROM busybox + ENV %s %s + ARG %s + RUN echo $%s + CMD echo $%s + `, envKey, envValOveride, envKey, envKey, envKey) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOveride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + + wdVar := "WDIR" + wdVal := "/tmp/" + addVar := "AFILE" + addVal := "addFile" + copyVar := "CFILE" + copyVal := "copyFile" + envVar := "foo" + envVal := "bar" + exposeVar := "EPORT" + exposeVal := "9999" + userVar := "USER" + userVal := "testUser" + volVar := "VOL" + volVal := "/testVol/" + + buildImageSuccessfully(c, imgName, + cli.WithFlags( + "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), + "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), + "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), + "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), + "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), + "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), + "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), + ), + build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox + ARG %s + WORKDIR ${%s} + ARG %s + ADD ${%s} testDir/ + ARG %s + COPY $%s testDir/ + ARG %s + ENV %s=${%s} + ARG %s + EXPOSE $%s + ARG %s + USER $%s + ARG %s + VOLUME ${%s}`, + wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, + envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar)), + build.WithFile(addVal, "some stuff"), + build.WithFile(copyVal, "some stuff"), + ), + ) + + res := inspectField(c, imgName, "Config.WorkingDir") + c.Check(res, check.Equals, filepath.ToSlash(wdVal)) + + var resArr []string + inspectFieldAndUnmarshall(c, imgName, "Config.Env", &resArr) + + found := false + for _, v := range resArr { + if fmt.Sprintf("%s=%s", envVar, envVal) == v { + found = true + break + } + } + if !found { + c.Fatalf("Config.Env value mismatch. Expected to exist: %s=%s, got: %v", + envVar, envVal, resArr) + } + + var resMap map[string]interface{} + inspectFieldAndUnmarshall(c, imgName, "Config.ExposedPorts", &resMap) + if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { + c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) + } + + res = inspectField(c, imgName, "Config.User") + if res != userVal { + c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) + } + + inspectFieldAndUnmarshall(c, imgName, "Config.Volumes", &resMap) + if _, ok := resMap[volVal]; !ok { + c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + envKey := "foo" + envVal := "bar" + envKey1 := "foo1" + envValOveride := "barOverride" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + ENV %s ${%s} + RUN echo $%s + CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOveride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + ARG %s + CMD echo $%s`, envKey, envKey, envKey) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), envVal) { + c.Fatalf("able to access environment variable in output: %q expected to be missing", result.Combined()) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support --build-arg + imgName := "bldargtest" + envKey := "HTTP_PROXY" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if !strings.Contains(result.Combined(), envVal) { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envVal) + } + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s + ENV %s $%s + RUN echo $%s + CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOveride) != 1 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + warnStr := "[Warning] One or more build-args" + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: warnStr, + }) +} + +func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + dockerfile := `FROM busybox + ARG FOO1=fromfile + ARG FOO2=fromfile + ARG FOO3=fromfile + ARG FOO4=fromfile + ARG FOO5 + ARG FOO6 + ARG FO10 + RUN env + RUN [ "$FOO1" == "fromcmd" ] + RUN [ "$FOO2" == "" ] + RUN [ "$FOO3" == "fromenv" ] + RUN [ "$FOO4" == "fromfile" ] + RUN [ "$FOO5" == "fromcmd" ] + # The following should not exist at all in the env + RUN [ "$(env | grep FOO6)" == "" ] + RUN [ "$(env | grep FOO7)" == "" ] + RUN [ "$(env | grep FOO8)" == "" ] + RUN [ "$(env | grep FOO9)" == "" ] + RUN [ "$FO10" == "" ] + ` + result := buildImage("testbuildtimeargenv", + cli.WithFlags( + "--build-arg", fmt.Sprintf("FOO1=fromcmd"), + "--build-arg", fmt.Sprintf("FOO2="), + "--build-arg", fmt.Sprintf("FOO3"), // set in env + "--build-arg", fmt.Sprintf("FOO4"), // not set in env + "--build-arg", fmt.Sprintf("FOO5=fromcmd"), + // FOO6 is not set at all + "--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning + "--build-arg", fmt.Sprintf("FOO8="), // should produce a warning + "--build-arg", fmt.Sprintf("FOO9"), // should produce a warning + "--build-arg", fmt.Sprintf("FO10"), // not set in env, empty value + ), + cli.WithEnvironmentVariables(append(os.Environ(), + "FOO1=fromenv", + "FOO2=fromenv", + "FOO3=fromenv")...), + build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + ), + ) + result.Assert(c, icmd.Success) + + // Now check to make sure we got a warning msg about unused build-args + i := strings.Index(result.Combined(), "[Warning]") + if i < 0 { + c.Fatalf("Missing the build-arg warning in %q", result.Combined()) + } + + out := result.Combined()[i:] // "out" should contain just the warning message now + + // These were specified on a --build-arg but no ARG was in the Dockerfile + c.Assert(out, checker.Contains, "FOO7") + c.Assert(out, checker.Contains, "FOO8") + c.Assert(out, checker.Contains, "FOO9") +} + +func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + envKey3 := "foo3" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s="" + ARG %s='' + ARG %s="''" + ARG %s='""' + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, + envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, + envKey2, envKey3) + buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile)) +} + +func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s= + ARG %s="" + ARG %s='' + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) + buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile)) +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN env`, envKey) + + result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envKey) != 1 { + c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", result.Combined()) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgMultipleFrom(c *check.C) { + imgName := "multifrombldargtest" + dockerfile := `FROM busybox + ARG foo=abc + LABEL multifromtest=1 + RUN env > /out + FROM busybox + ARG bar=def + RUN env > /out` + + result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1") + parentID := strings.TrimSpace(result.Stdout()) + + result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out") + c.Assert(result.Stdout(), checker.Contains, "foo=abc") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "foo") + c.Assert(result.Stdout(), checker.Contains, "bar=def") +} + +func (s *DockerSuite) TestBuildBuildTimeFromArgMultipleFrom(c *check.C) { + imgName := "multifrombldargtest" + dockerfile := `ARG tag=nosuchtag + FROM busybox:${tag} + LABEL multifromtest=1 + RUN env > /out + FROM busybox:${tag} + ARG tag + RUN env > /out` + + result := cli.BuildCmd(c, imgName, + build.WithDockerfile(dockerfile), + cli.WithFlags("--build-arg", fmt.Sprintf("tag=latest"))) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1") + parentID := strings.TrimSpace(result.Stdout()) + + result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "tag") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Contains, "tag=latest") +} + +func (s *DockerSuite) TestBuildBuildTimeUnusedArgMultipleFrom(c *check.C) { + imgName := "multifromunusedarg" + dockerfile := `FROM busybox + ARG foo + FROM busybox + ARG bar + RUN env > /out` + + result := cli.BuildCmd(c, imgName, + build.WithDockerfile(dockerfile), + cli.WithFlags("--build-arg", fmt.Sprintf("baz=abc"))) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "[Warning]") + c.Assert(result.Combined(), checker.Contains, "[baz] were not consumed") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "bar") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "baz") +} + +func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { + volName := "testname:/foo" + + if testEnv.DaemonPlatform() == "windows" { + volName = "testname:C:\\foo" + } + dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops") + + dockerFile := `FROM busybox + VOLUME ` + volName + ` + RUN ls /foo/oops + ` + buildImage("test", build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestBuildTagEvent(c *check.C) { + since := daemonUnixTime(c) + + dockerFile := `FROM busybox + RUN echo events + ` + buildImageSuccessfully(c, "test", build.WithDockerfile(dockerFile)) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image") + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, "test:latest", "image") + var foundTag bool + for _, a := range actions { + if a == "tag" { + foundTag = true + break + } + } + + c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out)) +} + +// #15780 +func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER test-15780 + ` + buildImageSuccessfully(c, "tag1", cli.WithFlags("-t", "tag2:v2", "-t", "tag1:latest", "-t", "tag1"), build.WithDockerfile(dockerfile)) + + id1 := getIDByName(c, "tag1") + id2 := getIDByName(c, "tag2:v2") + c.Assert(id1, check.Equals, id2) +} + +// #17290 +func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { + name := "testbuildbrokensymlink" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY . ./`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + err := os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + // warm up cache + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + // add new file to context, should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) + c.Assert(err, checker.IsNil) + + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + if strings.Contains(result.Combined(), "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } +} + +func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { + name := "testbuildbrokensymlink" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY asymlink target`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined() + c.Assert(out, checker.Matches, "bar") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") + + out = cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined() + c.Assert(out, checker.Matches, "baz") +} + +func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { + name := "testbuildbrokensymlink" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY asymlink /`), + fakecontext.WithFiles(map[string]string{ + "foo/abc": "bar", + "foo/def": "baz", + })) + defer ctx.Close() + + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined() + c.Assert(out, checker.Matches, "barbaz") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) + c.Assert(err, checker.IsNil) + + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") + + out = cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined() + c.Assert(out, checker.Matches, "barbax") + +} + +// TestBuildSymlinkBasename tests that target file gets basename from symlink, +// not from the target file. +func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { + name := "testbuildbrokensymlink" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY asymlink /`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "asymlink").Combined() + c.Assert(out, checker.Matches, "bar") +} + +// #17827 +func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { + name := "testbuildrootsource" + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` + FROM busybox + COPY / /data`), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + // warm up cache + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + // change file, should invalidate cache + err := ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") +} + +// #19375 +func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { + buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="), + build.WithContextPath("github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "unable to prepare context: unable to find 'git': ", + }) + + buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="), + build.WithContextPath("https://github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "unable to prepare context: unable to find 'git': ", + }) +} + +// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir +func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildworkdirwindowspath" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+testEnv.MinimalBaseImage()+` + RUN mkdir C:\\work + WORKDIR C:\\work + RUN if "%CD%" NEQ "C:\work" exit -1 + `)) +} + +func (s *DockerSuite) TestBuildLabel(c *check.C) { + name := "testbuildlabel" + testLabel := "foo" + + buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel), + build.WithDockerfile(` + FROM `+minimalBaseImage()+` + LABEL default foo +`)) + + var labels map[string]string + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) { + name := "testbuildlabel" + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=bar"), + build.WithDockerfile("FROM busybox")) + + var labels map[string]string + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) + v, ok := labels["foo"] + if !ok { + c.Fatal("label `foo` not found in image") + } + c.Assert(v, checker.Equals, "bar") +} + +func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) { + name := "testbuildlabelcachecommit" + testLabel := "foo" + + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+minimalBaseImage()+` + LABEL default foo + `)) + buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel), + build.WithDockerfile(` + FROM `+minimalBaseImage()+` + LABEL default foo + `)) + + var labels map[string]string + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { + name := "testbuildlabelmultiple" + testLabels := map[string]string{ + "foo": "bar", + "123": "456", + } + labelArgs := []string{} + for k, v := range testLabels { + labelArgs = append(labelArgs, "--label", k+"="+v) + } + + buildImageSuccessfully(c, name, cli.WithFlags(labelArgs...), + build.WithDockerfile(` + FROM `+minimalBaseImage()+` + LABEL default foo +`)) + + var labels map[string]string + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) + for k, v := range testLabels { + if x, ok := labels[k]; !ok || x != v { + c.Fatalf("label %s=%s not found in image", k, v) + } + } +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) { + dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + baseImage := privateRegistryURL + "/baseimage" + + buildImageSuccessfully(c, baseImage, build.WithDockerfile(` + FROM busybox + ENV env1 val1 + `)) + + dockerCmd(c, "push", baseImage) + dockerCmd(c, "rmi", baseImage) + + buildImageSuccessfully(c, baseImage, build.WithDockerfile(fmt.Sprintf(` + FROM %s + ENV env2 val2 + `, baseImage))) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + // make sure the image is pulled when building + dockerCmd(c, "rmi", repoName) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", tmp, "build", "-"}, + Stdin: strings.NewReader(fmt.Sprintf("FROM %s", repoName)), + }).Assert(c, icmd.Success) +} + +// Test cases in #22036 +func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { + // Command line option labels will always override + name := "scratchy" + expected := `{"bar":"from-flag","foo":"from-flag"}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`)) + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + name = "from" + expected = `{"foo":"from-dockerfile"}` + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo from-dockerfile`)) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option label will override even via `FROM` + name = "new" + expected = `{"bar":"from-dockerfile2","foo":"new"}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=new"), + build.WithDockerfile(`FROM from + LABEL bar from-dockerfile2`)) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + name = "scratchy2" + expected = `{"bar":"","foo":""}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo", "--label", "bar="), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`)) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + // This time is for inherited images + name = "new2" + expected = `{"bar":"","foo":""}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=", "--label", "bar"), + build.WithDockerfile(`FROM from + LABEL bar from-dockerfile2`)) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with only `FROM` + name = "scratchy" + expected = `{"bar":"from-flag","foo":"from-flag"}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"), + build.WithDockerfile(`FROM `+minimalBaseImage())) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with env var + name = "scratchz" + expected = `{"bar":"$PATH"}` + buildImageSuccessfully(c, name, cli.WithFlags("--label", "bar=$PATH"), + build.WithDockerfile(`FROM `+minimalBaseImage())) + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } +} + +// Test case for #22855 +func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) { + name := "test-delete-committed-file" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo test > file + RUN test -e file + RUN rm file + RUN sh -c "! test -e file"`)) +} + +// #20083 +func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { + // TODO Windows: Figure out why this test is flakey on TP5. If you add + // something like RUN sleep 5, or even RUN ls /tmp after the ADD line, + // it is more reliable, but that's not a good fix. + testRequires(c, DaemonIsLinux) + + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(ls -la /tmp/#1)" + RUN sh -c "(! ls -la /tmp/#2)" + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("foo", "foo"), + build.WithFile("foo2", "foo2"), + build.WithFile("dir1/foo", "foo in dir1"), + build.WithFile("#1", "# file 1"), + build.WithFile("#2", "# file 2"), + build.WithFile(".dockerignore", `# Visual C++ cache files +# because we have git ;-) +# The above comment is from #20083 +foo +#dir1/foo +foo2 +# The following is considered as comment as # is at the beginning +#1 +# The following is not considered as comment as # is not at the beginning + #2 +`))) +} + +// Test case for #23221 +func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { + name := "test-with-utf8-bom" + dockerfile := []byte(`FROM busybox`) + bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", string(bomDockerfile)), + )) +} + +// Test case for UTF-8 BOM in .dockerignore, related to #23221 +func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { + name := "test-with-utf8-bom-dockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls -la /tmp + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + dockerignore := []byte("./Dockerfile\n") + bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", string(bomDockerignore)), + )) +} + +// #22489 Shell test to confirm config gets updated correctly +func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { + name := "testbuildshellupdatesconfig" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + SHELL ["foo", "-bar"]`)) + expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]` + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + if res != expected { + c.Fatalf("%s, expected %s", res, expected) + } + res = inspectFieldJSON(c, name, "ContainerConfig.Shell") + if res != `["foo","-bar"]` { + c.Fatalf(`%s, expected ["foo","-bar"]`, res) + } +} + +// #22489 Changing the shell multiple times and CMD after. +func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { + name := "testbuildshellmultiple" + + result := buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo defaultshell + SHELL ["echo"] + RUN echoshell + SHELL ["ls"] + RUN -l + CMD -l`)) + result.Assert(c, icmd.Success) + + // Must contain 'defaultshell' twice + if len(strings.Split(result.Combined(), "defaultshell")) != 3 { + c.Fatalf("defaultshell should have appeared twice in %s", result.Combined()) + } + + // Must contain 'echoshell' twice + if len(strings.Split(result.Combined(), "echoshell")) != 3 { + c.Fatalf("echoshell should have appeared twice in %s", result.Combined()) + } + + // Must contain "total " (part of ls -l) + if !strings.Contains(result.Combined(), "total ") { + c.Fatalf("%s should have contained 'total '", result.Combined()) + } + + // A container started from the image uses the shell-form CMD. + // Last shell is ls. CMD is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489. Changed SHELL with ENTRYPOINT +func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { + name := "testbuildshellentrypoint" + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + SHELL ["ls"] + ENTRYPOINT -l`)) + // A container started from the image uses the shell-form ENTRYPOINT. + // Shell is ls. ENTRYPOINT is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489 Shell test to confirm shell is inherited in a subsequent build +func (s *DockerSuite) TestBuildShellInherited(c *check.C) { + name1 := "testbuildshellinherited1" + buildImageSuccessfully(c, name1, build.WithDockerfile(`FROM busybox + SHELL ["ls"]`)) + name2 := "testbuildshellinherited2" + buildImage(name2, build.WithDockerfile(`FROM `+name1+` + RUN -l`)).Assert(c, icmd.Expected{ + // ls -l has "total " followed by some number in it, ls without -l does not. + Out: "total ", + }) +} + +// #22489 Shell test to confirm non-JSON doesn't work +func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { + name := "testbuildshellnotjson" + + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + sHeLl exec -form`, // Casing explicit to ensure error is upper-cased. + )).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "SHELL requires the arguments to be in JSON form", + }) +} + +// #22489 Windows shell test to confirm native is powershell if executing a PS command +// This would error if the default shell were still cmd. +func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildshellpowershell" + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + SHELL ["powershell", "-command"] + RUN Write-Host John`)).Assert(c, icmd.Expected{ + Out: "\nJohn\n", + }) +} + +// Verify that escape is being correctly applied to words when escape directive is not \. +// Tests WORKDIR, ADD +func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildescapenotbackslashwordtesta" + buildImage(name, build.WithDockerfile(`# escape= `+"`"+` + FROM `+minimalBaseImage()+` + WORKDIR c:\windows + RUN dir /w`)).Assert(c, icmd.Expected{ + Out: "[System32]", + }) + + name = "testbuildescapenotbackslashwordtestb" + buildImage(name, build.WithDockerfile(`# escape= `+"`"+` + FROM `+minimalBaseImage()+` + SHELL ["powershell.exe"] + WORKDIR c:\foo + ADD Dockerfile c:\foo\ + RUN dir Dockerfile`)).Assert(c, icmd.Expected{ + Out: "-a----", + }) +} + +// #22868. Make sure shell-form CMD is marked as escaped in the config of the image +func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildcmdshellescaped" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+minimalBaseImage()+` + CMD "ipconfig" + `)) + res := inspectFieldJSON(c, name, "Config.ArgsEscaped") + if res != "true" { + c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res) + } + dockerCmd(c, "run", "--name", "inspectme", name) + dockerCmd(c, "wait", "inspectme") + res = inspectFieldJSON(c, name, "Config.Cmd") + + if res != `["cmd","/S","/C","\"ipconfig\""]` { + c.Fatalf("CMD was not escaped Config.Cmd: got %v", res) + } +} + +// Test case for #24912. +func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) { + name := "testbuildstepswithprogress" + totalRun := 5 + result := buildImage(name, build.WithDockerfile("FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun))) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun)) + for i := 2; i <= 1+totalRun; i++ { + c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun)) + } +} + +func (s *DockerSuite) TestBuildWithFailure(c *check.C) { + name := "testbuildwithfailure" + + // First test case can only detect `nobody` in runtime so all steps will show up + dockerfile := "FROM busybox\nRUN nobody" + result := buildImage(name, build.WithDockerfile(dockerfile)) + c.Assert(result.Error, checker.NotNil) + c.Assert(result.Stdout(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Stdout(), checker.Contains, "Step 2/2 : RUN nobody") + + // Second test case `FFOM` should have been detected before build runs so no steps + dockerfile = "FFOM nobody\nRUN nobody" + result = buildImage(name, build.WithDockerfile(dockerfile)) + c.Assert(result.Error, checker.NotNil) + c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 1/2 : FROM busybox") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 2/2 : RUN nobody") +} + +func (s *DockerSuite) TestBuildCacheFromEqualDiffIDsLength(c *check.C) { + dockerfile := ` + FROM busybox + RUN echo "test" + ENTRYPOINT ["sh"]` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, "build1") + + // rebuild with cache-from + result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, "build2") + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) +} + +func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { + testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows + dockerfile := ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch bax` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, "build1") + + // rebuild with cache-from + result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, "build2") + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + cli.DockerCmd(c, "rmi", "build2") + + // no cache match with unknown source + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=nosuchtag"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 0) + cli.DockerCmd(c, "rmi", "build2") + + // clear parent images + tempDir, err := ioutil.TempDir("", "test-build-cache-from-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, "img.tar") + cli.DockerCmd(c, "save", "-o", tempFile, "build1") + cli.DockerCmd(c, "rmi", "build1") + cli.DockerCmd(c, "load", "-i", tempFile) + parentID := cli.DockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1").Combined() + c.Assert(strings.TrimSpace(parentID), checker.Equals, "") + + // cache still applies without parents + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + history1 := cli.DockerCmd(c, "history", "-q", "build2").Combined() + + // Retry, no new intermediate images + result = cli.BuildCmd(c, "build3", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, "build3") + c.Assert(id1, checker.Equals, id3) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + history2 := cli.DockerCmd(c, "history", "-q", "build3").Combined() + + c.Assert(history1, checker.Equals, history2) + cli.DockerCmd(c, "rmi", "build2") + cli.DockerCmd(c, "rmi", "build3") + cli.DockerCmd(c, "rmi", "build1") + cli.DockerCmd(c, "load", "-i", tempFile) + + // Modify file, everything up to last command and layers are reused + dockerfile = ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch newfile` + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644) + c.Assert(err, checker.IsNil) + + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) + + layers1Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1").Combined() + layers2Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2").Combined() + + var layers1 []string + var layers2 []string + c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil) + c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil) + + c.Assert(len(layers1), checker.Equals, len(layers2)) + for i := 0; i < len(layers1)-1; i++ { + c.Assert(layers1[i], checker.Equals, layers2[i]) + } + c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1]) +} + +func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) { + testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows + dockerfile := ` + FROM busybox + ADD baz / + FROM busybox + ADD baz /` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + })) + defer ctx.Close() + + result := cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + // second part of dockerfile was a repeat of first so should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1) + + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + // now both parts of dockerfile should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) +} + +func (s *DockerSuite) TestBuildNetNone(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildnetnone" + buildImage(name, cli.WithFlags("--network=none"), build.WithDockerfile(` + FROM busybox + RUN ping -c 1 8.8.8.8 + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Out: "unreachable", + }) +} + +func (s *DockerSuite) TestBuildNetContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname") + + name := "testbuildnetcontainer" + buildImageSuccessfully(c, name, cli.WithFlags("--network=container:"+strings.TrimSpace(id)), + build.WithDockerfile(` + FROM busybox + RUN nc localhost 1234 > /otherhost + `)) + + host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost") + c.Assert(strings.TrimSpace(host), check.Equals, "foobar") +} + +func (s *DockerSuite) TestBuildWithExtraHost(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "testbuildwithextrahost" + buildImageSuccessfully(c, name, + cli.WithFlags( + "--add-host", "foo:127.0.0.1", + "--add-host", "bar:127.0.0.1", + ), + build.WithDockerfile(` + FROM busybox + RUN ping -c 1 foo + RUN ping -c 1 bar + `)) +} + +func (s *DockerSuite) TestBuildWithExtraHostInvalidFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerfile := ` + FROM busybox + RUN ping -c 1 foo` + + testCases := []struct { + testName string + dockerfile string + buildFlag string + }{ + {"extra_host_missing_ip", dockerfile, "--add-host=foo"}, + {"extra_host_missing_ip_with_delimeter", dockerfile, "--add-host=foo:"}, + {"extra_host_missing_hostname", dockerfile, "--add-host=:127.0.0.1"}, + {"extra_host_invalid_ipv4", dockerfile, "--add-host=foo:101.10.2"}, + {"extra_host_invalid_ipv6", dockerfile, "--add-host=foo:2001::1::3F"}, + } + + for _, tc := range testCases { + result := buildImage(tc.testName, cli.WithFlags(tc.buildFlag), build.WithDockerfile(tc.dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + }) + } + +} + +func (s *DockerSuite) TestBuildSquashParent(c *check.C) { + testRequires(c, ExperimentalDaemon) + dockerFile := ` + FROM busybox + RUN echo hello > /hello + RUN echo world >> /hello + RUN echo hello > /remove_me + ENV HELLO world + RUN rm /remove_me + ` + // build and get the ID that we can use later for history comparison + name := "test" + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + origID := getIDByName(c, name) + + // build with squash + buildImageSuccessfully(c, name, cli.WithFlags("--squash"), build.WithDockerfile(dockerFile)) + id := getIDByName(c, name) + + out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld") + + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]") + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`) + + // make sure the ID produced is the ID of the tag we specified + inspectID := inspectImage(c, "test", ".ID") + c.Assert(inspectID, checker.Equals, id) + + origHistory, _ := dockerCmd(c, "history", origID) + testHistory, _ := dockerCmd(c, "history", "test") + + splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n") + splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n") + c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1) + + out = inspectImage(c, id, "len .RootFS.Layers") + c.Assert(strings.TrimSpace(out), checker.Equals, "2") +} + +func (s *DockerSuite) TestBuildContChar(c *check.C) { + name := "testbuildcontchar" + + buildImage(name, build.WithDockerfile(`FROM busybox\`)).Assert(c, icmd.Expected{ + Out: "Step 1/1 : FROM busybox", + }) + + result := buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi\n") + + result = buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \\`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\n") + + result = buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \\\`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n") +} + +func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) { + dockerfile := ` + FROM busybox AS first + COPY foo bar + + FROM busybox + %s + COPY baz baz + RUN echo mno > baz/cc + + FROM busybox + COPY bar / + COPY --from=1 baz sub/ + COPY --from=0 bar baz + COPY --from=first bar bay` + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, "")), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + "bar": "def", + "baz/aa": "ghi", + "baz/bb": "jkl", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + cli.DockerCmd(c, "run", "build1", "cat", "bar").Assert(c, icmd.Expected{Out: "def"}) + cli.DockerCmd(c, "run", "build1", "cat", "sub/aa").Assert(c, icmd.Expected{Out: "ghi"}) + cli.DockerCmd(c, "run", "build1", "cat", "sub/cc").Assert(c, icmd.Expected{Out: "mno"}) + cli.DockerCmd(c, "run", "build1", "cat", "baz").Assert(c, icmd.Expected{Out: "abc"}) + cli.DockerCmd(c, "run", "build1", "cat", "bay").Assert(c, icmd.Expected{Out: "abc"}) + + result := cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + // all commands should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 7) + c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2")) + + err := ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(fmt.Sprintf(dockerfile, "COPY baz/aa foo")), 0644) + c.Assert(err, checker.IsNil) + + // changing file in parent block should not affect last block + result = cli.BuildCmd(c, "build3", build.WithExternalBuildContext(ctx)) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) + + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("pqr"), 0644) + c.Assert(err, checker.IsNil) + + // changing file in parent block should affect both first and last block + result = cli.BuildCmd(c, "build4", build.WithExternalBuildContext(ctx)) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) + + cli.DockerCmd(c, "run", "build4", "cat", "bay").Assert(c, icmd.Expected{Out: "pqr"}) + cli.DockerCmd(c, "run", "build4", "cat", "baz").Assert(c, icmd.Expected{Out: "pqr"}) +} + +func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) { + testCases := []struct { + dockerfile string + expectedError string + }{ + { + dockerfile: ` + FROM busybox + COPY --from=foo foo bar`, + expectedError: "invalid from flag value foo", + }, + { + dockerfile: ` + FROM busybox + COPY --from=0 foo bar`, + expectedError: "invalid from flag value 0: refers to current build stage", + }, + { + dockerfile: ` + FROM busybox AS foo + COPY --from=bar foo bar`, + expectedError: "invalid from flag value bar", + }, + { + dockerfile: ` + FROM busybox AS 1 + COPY --from=1 foo bar`, + expectedError: "invalid name for build stage", + }, + } + + for _, tc := range testCases { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(tc.dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + + cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: tc.expectedError, + }) + + ctx.Close() + } +} + +func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) { + dockerfile := ` + FROM busybox + COPY foo bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + dockerfile = ` + FROM build1:latest AS foo + FROM busybox + COPY --from=foo bar / + COPY foo /` + ctx = fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "def", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "build2", "cat", "bar").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + out = cli.DockerCmd(c, "run", "build2", "cat", "foo").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "def") +} + +func (s *DockerSuite) TestBuildCopyFromImplicitFrom(c *check.C) { + dockerfile := ` + FROM busybox + COPY --from=busybox /etc/passwd /mypasswd + RUN cmp /etc/passwd /mypasswd` + + if DaemonIsWindows() { + dockerfile = ` + FROM busybox + COPY --from=busybox License.txt foo` + } + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + if DaemonIsWindows() { + out := cli.DockerCmd(c, "run", "build1", "cat", "License.txt").Combined() + c.Assert(len(out), checker.GreaterThan, 10) + out2 := cli.DockerCmd(c, "run", "build1", "cat", "foo").Combined() + c.Assert(out, check.Equals, out2) + } +} + +func (s *DockerRegistrySuite) TestBuildCopyFromImplicitPullingFrom(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + dockerfile := ` + FROM busybox + COPY foo bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, repoName, build.WithExternalBuildContext(ctx)) + + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + dockerfile = ` + FROM busybox + COPY --from=%s bar baz` + + ctx = fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, repoName))) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"}) +} + +func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) { + dockerfile := ` + FROM busybox as foo + COPY foo / + FROM foo as foo1 + RUN echo 1 >> foo + FROM foo as foO2 + RUN echo 2 >> foo + FROM foo + COPY --from=foo1 foo f1 + COPY --from=FOo2 foo f2 + ` // foo2 case also tests that names are canse insensitive + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + cli.Docker(cli.Args("run", "build1", "cat", "foo")).Assert(c, icmd.Expected{Out: "bar"}) + cli.Docker(cli.Args("run", "build1", "cat", "f1")).Assert(c, icmd.Expected{Out: "bar1"}) + cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"}) +} + +func (s *DockerTrustSuite) TestCopyFromTrustedBuild(c *check.C) { + img1 := s.setupTrustedImage(c, "trusted-build1") + img2 := s.setupTrustedImage(c, "trusted-build2") + dockerFile := fmt.Sprintf(` + FROM %s AS build-base + RUN echo ok > /foo + FROM %s + COPY --from=build-base foo bar`, img1, img2) + + name := "testcopyfromtrustedbuild" + + r := buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)) + r.Assert(c, icmd.Expected{ + Out: fmt.Sprintf("FROM %s@sha", img1[:len(img1)-7]), + }) + r.Assert(c, icmd.Expected{ + Out: fmt.Sprintf("FROM %s@sha", img2[:len(img2)-7]), + }) + + dockerCmdWithResult("run", name, "cat", "bar").Assert(c, icmd.Expected{Out: "ok"}) +} + +func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.MinimalBaseImage() + ` + COPY foo c:\\bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + dockerfile = ` + FROM build1:latest + FROM ` + testEnv.MinimalBaseImage() + ` + COPY --from=0 c:\\bar / + COPY foo /` + ctx = fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "def", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\bar").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + out = cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\foo").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "def") +} + +func (s *DockerSuite) TestBuildCopyFromForbidWindowsSystemPaths(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.MinimalBaseImage() + ` + FROM ` + testEnv.MinimalBaseImage() + ` + COPY --from=0 %s c:\\oscopy + ` + exp := icmd.Expected{ + ExitCode: 1, + Err: "copy from c:\\ or c:\\windows is not allowed on windows", + } + buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "C:\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\windows"))).Assert(c, exp) + buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\wInDows"))).Assert(c, exp) +} + +func (s *DockerSuite) TestBuildCopyFromForbidWindowsRelativePaths(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.MinimalBaseImage() + ` + FROM ` + testEnv.MinimalBaseImage() + ` + COPY --from=0 %s c:\\oscopy + ` + exp := icmd.Expected{ + ExitCode: 1, + Err: "copy from c:\\ or c:\\windows is not allowed on windows", + } + buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:"))).Assert(c, exp) + buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "."))).Assert(c, exp) + buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "..\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, ".\\\\windows"))).Assert(c, exp) + buildImage("testforbidsystempaths5", build.WithDockerfile(fmt.Sprintf(dockerfile, "\\\\windows"))).Assert(c, exp) +} + +func (s *DockerSuite) TestBuildCopyFromWindowsIsCaseInsensitive(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.MinimalBaseImage() + ` + COPY foo / + FROM ` + testEnv.MinimalBaseImage() + ` + COPY --from=0 c:\\fOo c:\\copied + RUN type c:\\copied + ` + cli.Docker(cli.Build("copyfrom-windows-insensitive"), build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("foo", "hello world"), + )).Assert(c, icmd.Expected{ + ExitCode: 0, + Out: "hello world", + }) +} + +// #33176 +func (s *DockerSuite) TestBuildCopyFromResetScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerfile := ` + FROM busybox + WORKDIR /foo/bar + FROM scratch + ENV FOO=bar + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + res := cli.InspectCmd(c, "build1", cli.Format(".Config.WorkingDir")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, "") +} + +func (s *DockerSuite) TestBuildIntermediateTarget(c *check.C) { + dockerfile := ` + FROM busybox AS build-env + CMD ["/dev"] + FROM busybox + CMD ["/dist"] + ` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx), + cli.WithFlags("--target", "build-env")) + + //res := inspectFieldJSON(c, "build1", "Config.Cmd") + res := cli.InspectCmd(c, "build1", cli.Format("json .Config.Cmd")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, `["/dev"]`) + + result := cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx), + cli.WithFlags("--target", "nosuchtarget")) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "failed to reach build target", + }) +} + +// TestBuildOpaqueDirectory tests that a build succeeds which +// creates opaque directories. +// See https://github.com/docker/docker/issues/25244 +func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerFile := ` + FROM busybox + RUN mkdir /dir1 && touch /dir1/f1 + RUN rm -rf /dir1 && mkdir /dir1 && touch /dir1/f2 + RUN touch /dir1/f3 + RUN [ -f /dir1/f2 ] + ` + // Test that build succeeds, last command fails if opaque directory + // was not handled correctly + buildImageSuccessfully(c, "testopaquedirectory", build.WithDockerfile(dockerFile)) +} + +// Windows test for USER in dockerfile +func (s *DockerSuite) TestBuildWindowsUser(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsuser" + buildImage(name, build.WithDockerfile(`FROM `+testEnv.MinimalBaseImage()+` + RUN net user user /add + USER user + RUN set username + `)).Assert(c, icmd.Expected{ + Out: "USERNAME=user", + }) +} + +// Verifies if COPY file . when WORKDIR is set to a non-existing directory, +// the directory is created and the file is copied into the directory, +// as opposed to the file being copied as a file with the name of the +// directory. Fix for 27545 (found on Windows, but regression good for Linux too). +// Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514. +func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) { + name := "testbuildcopyfiledotwithworkdir" + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +WORKDIR /foo +COPY file . +RUN ["cat", "/foo/file"] +`), + build.WithFile("file", "content"), + )) +} + +// Case-insensitive environment variables on Windows +func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsenvcaseinsensitive" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+testEnv.MinimalBaseImage()+` + ENV FOO=bar foo=baz + `)) + res := inspectFieldJSON(c, name, "Config.Env") + if res != `["foo=baz"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped. + c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res) + } +} + +// Test case for 29667 +func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) { + image := "testworkdirimagecmd" + buildImageSuccessfully(c, image, build.WithDockerfile(` +FROM busybox +WORKDIR /foo/bar +`)) + out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + + // The Windows busybox image has a blank `cmd` + lookingFor := `["sh"]` + if testEnv.DaemonPlatform() == "windows" { + lookingFor = "null" + } + c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor) + + image = "testworkdirlabelimagecmd" + buildImageSuccessfully(c, image, build.WithDockerfile(` +FROM busybox +WORKDIR /foo/bar +LABEL a=b +`)) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor) +} + +// Test case for 28902/28909 +func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildworkdircmd" + dockerFile := ` + FROM busybox + WORKDIR / + ` + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + result := buildImage(name, build.WithDockerfile(dockerFile)) + result.Assert(c, icmd.Success) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorOnBuild(c *check.C) { + name := "test_build_line_error_onbuild" + buildImage(name, build.WithDockerfile(`FROM busybox + ONBUILD + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 2: ONBUILD requires at least one argument", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *check.C) { + name := "test_build_line_error_unknown_instruction" + cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox + RUN echo hello world + NOINSTRUCTION echo ba + RUN echo hello + ERROR + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 3: unknown instruction: NOINSTRUCTION", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *check.C) { + name := "test_build_line_error_with_empty_lines" + cli.Docker(cli.Build(name), build.WithDockerfile(` + FROM busybox + + RUN echo hello world + + NOINSTRUCTION echo ba + + CMD ["/bin/init"] + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 6: unknown instruction: NOINSTRUCTION", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorWithComments(c *check.C) { + name := "test_build_line_error_with_comments" + cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox + # This will print hello world + # and then ba + RUN echo hello world + NOINSTRUCTION echo ba + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 5: unknown instruction: NOINSTRUCTION", + }) +} + +// #31957 +func (s *DockerSuite) TestBuildSetCommandWithDefinedShell(c *check.C) { + buildImageSuccessfully(c, "build1", build.WithDockerfile(` +FROM busybox +SHELL ["/bin/sh", "-c"] +`)) + buildImageSuccessfully(c, "build2", build.WithDockerfile(` +FROM build1 +CMD echo foo +`)) + + out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", "build2") + c.Assert(strings.TrimSpace(out), checker.Equals, `["/bin/sh","-c","echo foo"]`) +} + +func (s *DockerSuite) TestBuildIidFile(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestBuildIidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iid") + + name := "testbuildiidfile" + // Use a Dockerfile with multiple stages to ensure we get the last one + cli.BuildCmd(c, name, + build.WithDockerfile(`FROM `+minimalBaseImage()+` AS stage1 +ENV FOO FOO +FROM `+minimalBaseImage()+` +ENV BAR BAZ`), + cli.WithFlags("--iidfile", tmpIidFile)) + + id, err := ioutil.ReadFile(tmpIidFile) + c.Assert(err, check.IsNil) + d, err := digest.Parse(string(id)) + c.Assert(err, check.IsNil) + c.Assert(d.String(), checker.Equals, getIDByName(c, name)) +} + +func (s *DockerSuite) TestBuildIidFileCleanupOnFail(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestBuildIidFileCleanupOnFail") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iid") + + err = ioutil.WriteFile(tmpIidFile, []byte("Dummy"), 0666) + c.Assert(err, check.IsNil) + + cli.Docker(cli.Build("testbuildiidfilecleanuponfail"), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + RUN /non/existing/command`), + cli.WithFlags("--iidfile", tmpIidFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + _, err = os.Stat(tmpIidFile) + c.Assert(err, check.NotNil) + c.Assert(os.IsNotExist(err), check.Equals, true) +} + +func (s *DockerSuite) TestBuildIidFileSquash(c *check.C) { + testRequires(c, ExperimentalDaemon) + tmpDir, err := ioutil.TempDir("", "TestBuildIidFileSquash") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iidsquash") + + name := "testbuildiidfilesquash" + // Use a Dockerfile with multiple stages to ensure we get the last one + cli.BuildCmd(c, name, + // This could be minimalBaseImage except + // https://github.com/moby/moby/issues/33823 requires + // `touch` to workaround. + build.WithDockerfile(`FROM busybox +ENV FOO FOO +ENV BAR BAR +RUN touch /foop +`), + cli.WithFlags("--iidfile", tmpIidFile, "--squash")) + + id, err := ioutil.ReadFile(tmpIidFile) + c.Assert(err, check.IsNil) + d, err := digest.Parse(string(id)) + c.Assert(err, check.IsNil) + c.Assert(d.String(), checker.Equals, getIDByName(c, name)) +} diff --git a/integration-cli/docker_cli_build_unix_test.go b/integration-cli/docker_cli_build_unix_test.go new file mode 100644 index 0000000..11c6823 --- /dev/null +++ b/integration-cli/docker_cli_build_unix_test.go @@ -0,0 +1,204 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/go-units" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { + testRequires(c, cpuCfsQuota) + name := "testbuildresourceconstraints" + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(` + FROM hello-world:frozen + RUN ["/hello"] + `)) + cli.Docker( + cli.Args("build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, "."), + cli.InDir(ctx.Dir), + ).Assert(c, icmd.Success) + + out := cli.DockerCmd(c, "ps", "-lq").Combined() + cID := strings.TrimSpace(out) + + type hostConfig struct { + Memory int64 + MemorySwap int64 + CpusetCpus string + CpusetMems string + CPUShares int64 + CPUQuota int64 + Ulimits []*units.Ulimit + } + + cfg := inspectFieldJSON(c, cID, "HostConfig") + + var c1 hostConfig + err := json.Unmarshal([]byte(cfg), &c1) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c1.Memory, checker.Equals, int64(64*1024*1024), check.Commentf("resource constraints not set properly for Memory")) + c.Assert(c1.MemorySwap, checker.Equals, int64(-1), check.Commentf("resource constraints not set properly for MemorySwap")) + c.Assert(c1.CpusetCpus, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetCpus")) + c.Assert(c1.CpusetMems, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetMems")) + c.Assert(c1.CPUShares, checker.Equals, int64(100), check.Commentf("resource constraints not set properly for CPUShares")) + c.Assert(c1.CPUQuota, checker.Equals, int64(8000), check.Commentf("resource constraints not set properly for CPUQuota")) + c.Assert(c1.Ulimits[0].Name, checker.Equals, "nofile", check.Commentf("resource constraints not set properly for Ulimits")) + c.Assert(c1.Ulimits[0].Hard, checker.Equals, int64(42), check.Commentf("resource constraints not set properly for Ulimits")) + + // Make sure constraints aren't saved to image + cli.DockerCmd(c, "run", "--name=test", name) + + cfg = inspectFieldJSON(c, "test", "HostConfig") + + var c2 hostConfig + err = json.Unmarshal([]byte(cfg), &c2) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c2.Memory, check.Not(checker.Equals), int64(64*1024*1024), check.Commentf("resource leaked from build for Memory")) + c.Assert(c2.MemorySwap, check.Not(checker.Equals), int64(-1), check.Commentf("resource leaked from build for MemorySwap")) + c.Assert(c2.CpusetCpus, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetCpus")) + c.Assert(c2.CpusetMems, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetMems")) + c.Assert(c2.CPUShares, check.Not(checker.Equals), int64(100), check.Commentf("resource leaked from build for CPUShares")) + c.Assert(c2.CPUQuota, check.Not(checker.Equals), int64(8000), check.Commentf("resource leaked from build for CPUQuota")) + c.Assert(c2.Ulimits, checker.IsNil, check.Commentf("resource leaked from build for Ulimits")) +} + +func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddown" + + ctx := func() *fakecontext.Fake { + dockerfile := ` + FROM busybox + ADD foo /bar/ + RUN [ $(stat -c %U:%G "/bar") = 'root:root' ] + RUN [ $(stat -c %U:%G "/bar/foo") = 'root:root' ] + ` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testFile, err := os.Create(filepath.Join(tmpDir, "foo")) + if err != nil { + c.Fatalf("failed to create foo file: %v", err) + } + defer testFile.Close() + + icmd.RunCmd(icmd.Cmd{ + Command: []string{"chown", "daemon:daemon", "foo"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakecontext.New(c, tmpDir) + }() + + defer ctx.Close() + + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) +} + +// Test that an infinite sleep during a build is killed if the client disconnects. +// This test is fairly hairy because there are lots of ways to race. +// Strategy: +// * Monitor the output of docker events starting from before +// * Run a 1-year-long sleep from a docker build. +// * When docker events sees container start, close the "docker build" command +// * Wait for docker events to emit a dying event. +func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcancellation" + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // (Note: one year, will never finish) + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nRUN sleep 31536000")) + defer ctx.Close() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") + buildCmd.Dir = ctx.Dir + + stdoutBuild, err := buildCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + + if err := buildCmd.Start(); err != nil { + c.Fatalf("failed to run build: %s", err) + } + + matchCID := regexp.MustCompile("Running in (.+)") + scanner := bufio.NewScanner(stdoutBuild) + + outputBuffer := new(bytes.Buffer) + var buildID string + for scanner.Scan() { + line := scanner.Text() + outputBuffer.WriteString(line) + outputBuffer.WriteString("\n") + if matches := matchCID.FindStringSubmatch(line); len(matches) > 0 { + buildID = matches[1] + break + } + } + + if buildID == "" { + c.Fatalf("Unable to find build container id in build output:\n%s", outputBuffer.String()) + } + + testActions := map[string]chan bool{ + "start": make(chan bool, 1), + "die": make(chan bool, 1), + } + + matcher := matchEventLine(buildID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + // Send a kill to the `docker build` command. + // Causes the underlying build to be cancelled due to socket close. + if err := buildCmd.Process.Kill(); err != nil { + c.Fatalf("error killing build command: %s", err) + } + + // Get the exit status of `docker build`, check it exited because killed. + if err := buildCmd.Wait(); err != nil && !testutil.IsKilled(err) { + c.Fatalf("wait failed during build run: %T %s", err, err) + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } +} diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go new file mode 100644 index 0000000..c7115c8 --- /dev/null +++ b/integration-cli/docker_cli_by_digest_test.go @@ -0,0 +1,690 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +var ( + remoteRepoName = "dockercli/busybox-by-dgst" + repoName = fmt.Sprintf("%s/%s", privateRegistryURL, remoteRepoName) + pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+") + digestRegex = regexp.MustCompile("Digest: ([\\S]+)") +) + +func setupImage(c *check.C) (digest.Digest, error) { + return setupImageWithTag(c, "latest") +} + +func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { + containerName := "busyboxbydigest" + + // new file is committed because this layer is used for detecting malicious + // changes. if this was committed as empty layer it would be skipped on pull + // and malicious changes would never be detected. + cli.DockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox", "touch", "anewfile") + + // tag the image to upload it to the private registry + repoAndTag := repoName + ":" + tag + cli.DockerCmd(c, "commit", containerName, repoAndTag) + + // delete the container as we don't need it any more + cli.DockerCmd(c, "rm", "-fv", containerName) + + // push the image + out := cli.DockerCmd(c, "push", repoAndTag).Combined() + + // delete our local repo that we previously tagged + cli.DockerCmd(c, "rmi", repoAndTag) + + matches := pushDigestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) + pushDigest := matches[1] + + return digest.Digest(pushDigest), nil +} + +func testPullByTagDisplaysDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the tag + out, _ := dockerCmd(c, "pull", repoName) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func testPullByDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + out, _ := dockerCmd(c, "pull", imageReference) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func testPullByDigestNoFallback(c *check.C) { + testRequires(c, DaemonIsLinux) + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName) + out, _, err := dockerCmdWithError("pull", imageReference) + c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("manifest for %s not found", imageReference), check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) +} + +func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "createByDigest" + dockerCmd(c, "create", "--name", containerName, imageReference) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "runByDigest" + out, _ := dockerCmd(c, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") + + foundRegex := regexp.MustCompile("found=([^\n]+)") + matches := foundRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + c.Assert(matches[1], checker.Equals, "1", check.Commentf("Expected %q, got %q", "1", matches[1])) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // make sure inspect runs ok + inspectField(c, imageReference, "Id") + + // do the delete + err = deleteImages(imageReference) + c.Assert(err, checker.IsNil, check.Commentf("unexpected error deleting image")) + + // try to inspect again - it should error this time + _, err = inspectFieldWithError(imageReference, "Id") + //unexpected nil err trying to inspect what should be a non-existent image + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "No such object") +} + +func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // get the image id + imageID := inspectField(c, imageReference, "Id") + + // do the build + name := "buildbydigest" + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf( + `FROM %s + CMD ["/bin/echo", "Hello World"]`, imageReference))) + c.Assert(err, checker.IsNil) + + // get the build's image id + res := inspectField(c, name, "Config.Image") + // make sure they match + c.Assert(res, checker.Equals, imageID) +} + +func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // tag it + tag := "tagbydigest" + dockerCmd(c, "tag", imageReference, tag) + + expectedID := inspectField(c, imageReference, "Id") + + tagID := inspectField(c, tag, "Id") + c.Assert(tagID, checker.Equals, expectedID) +} + +func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "images") + c.Assert(out, checker.Not(checker.Contains), "DIGEST", check.Commentf("list output should not have contained DIGEST header")) +} + +func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { + + // setup image1 + digest1, err := setupImageWithTag(c, "tag1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "tag2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag1 + dockerCmd(c, "pull", repoName+":tag1") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag 2 + dockerCmd(c, "pull", repoName+":tag2") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + // make sure busybox has tag, but not digest + busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) + c.Assert(busyboxRe.MatchString(out), checker.True, check.Commentf("expected %q: %s", busyboxRe.String(), out)) +} + +func (s *DockerRegistrySuite) TestListDanglingImagesWithDigests(c *check.C) { + // setup image1 + digest1, err := setupImageWithTag(c, "dangle1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "dangle2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle1 tag + dockerCmd(c, "pull", repoName+":dangle1") + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*dangle1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle2 tag + dockerCmd(c, "pull", repoName+":dangle2") + + // list images, show tagged images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*dangle2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images, no longer dangling, should not match + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest2.String(), out)) +} + +func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, check.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "inspect", imageReference) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + c.Assert(imageJSON, checker.HasLen, 1) + c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) + c.Assert(stringutils.InSlice(imageJSON[0].RepoDigests, imageReference), checker.Equals, true) +} + +func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // build an image from it + imageName1 := "images_ps_filter_test" + buildImageSuccessfully(c, imageName1, build.WithDockerfile(fmt.Sprintf( + `FROM %s + LABEL match me 1`, imageReference))) + + // run a container based on that + dockerCmd(c, "run", "--name=test1", imageReference, "echo", "hello") + expectedID := getIDByName(c, "test1") + + // run a container based on the a descendant of that too + dockerCmd(c, "run", "--name=test2", imageName1, "echo", "hello") + expectedID1 := getIDByName(c, "test2") + + expectedIDs := []string{expectedID, expectedID1} + + // Invalid imageReference + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", fmt.Sprintf("--filter=ancestor=busybox@%s", digest)) + // Filter container for ancestor filter should be empty + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Valid imageReference + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) + checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) +} + +func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + // just in case... + + dockerCmd(c, "tag", imageReference, repoName+":sometag") + + imageID := inspectField(c, imageReference, "Id") + + dockerCmd(c, "rmi", imageID) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repoName + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted only repoTag2, because there's another tag + inspectField(c, repoTag, "Id") + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndMultiRepoTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + repo2 := fmt.Sprintf("%s/%s", repoName, "repo2") + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repo2 + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted repoTag and image reference, but left repoTag2 + inspectField(c, repoTag2, "Id") + _, err = inspectFieldWithError(imageReference, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image digest reference should have been removed")) + + _, err = inspectFieldWithError(repoTag, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image tag reference should have been removed")) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.TempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.FSLayers[0] = schema1.FSLayer{ + BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), + } + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.TempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.Layers[0].Digest + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.TempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(testEnv.DockerBasePath(), "image", s.d.StorageDriver(), "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.FSLayers[0].BlobSum + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.TempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(testEnv.DockerBasePath(), "image", s.d.StorageDriver(), "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go new file mode 100644 index 0000000..549b35a --- /dev/null +++ b/integration-cli/docker_cli_commit_test.go @@ -0,0 +1,157 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { + out := cli.DockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo").Combined() + + cleanedContainerID := strings.TrimSpace(out) + + cli.DockerCmd(c, "wait", cleanedContainerID) + + out = cli.DockerCmd(c, "commit", cleanedContainerID).Combined() + + cleanedImageID := strings.TrimSpace(out) + + cli.DockerCmd(c, "inspect", cleanedImageID) +} + +func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", "-p=false", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +//test commit a paused container should not unpause it after commit +func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + out = inspectField(c, cleanedContainerID, "State.Paused") + // commit should not unpause a paused container + c.Assert(out, checker.Contains, "true") +} + +func (s *DockerSuite) TestCommitNewFile(c *check.C) { + dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + + imageID, _ := dockerCmd(c, "commit", "commiter") + imageID = strings.TrimSpace(imageID) + + out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") + actual := strings.TrimSpace(out) + c.Assert(actual, checker.Equals, "koye") +} + +func (s *DockerSuite) TestCommitHardlink(c *check.C) { + testRequires(c, DaemonIsLinux) + firstOutput, _ := dockerCmd(c, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") + + chunks := strings.Split(strings.TrimSpace(firstOutput), " ") + inode := chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(firstOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) + + imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") + imageID = strings.TrimSpace(imageID) + + secondOutput, _ := dockerCmd(c, "run", "-t", imageID, "ls", "-di", "file1", "file2") + + chunks = strings.Split(strings.TrimSpace(secondOutput), " ") + inode = chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(secondOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) +} + +func (s *DockerSuite) TestCommitTTY(c *check.C) { + dockerCmd(c, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", imageID, "/bin/ls") +} + +func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", imageID, "true") +} + +func (s *DockerSuite) TestCommitChange(c *check.C) { + dockerCmd(c, "run", "--name", "test", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "EXPOSE 8080", + "--change", "ENV DEBUG true", + "--change", "ENV test 1", + "--change", "ENV PATH /foo", + "--change", "LABEL foo bar", + "--change", "CMD [\"/bin/sh\"]", + "--change", "WORKDIR /opt", + "--change", "ENTRYPOINT [\"/bin/sh\"]", + "--change", "USER testuser", + "--change", "VOLUME /var/lib/docker", + "--change", "ONBUILD /usr/local/bin/python-build --dir /app/src", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalised on Windows + expected := map[string]string{ + "Config.ExposedPorts": "map[8080/tcp:{}]", + "Config.Env": "[DEBUG=true test=1 PATH=/foo]", + "Config.Labels": "map[foo:bar]", + "Config.Cmd": "[/bin/sh]", + "Config.WorkingDir": prefix + slash + "opt", + "Config.Entrypoint": "[/bin/sh]", + "Config.User": "testuser", + "Config.Volumes": "map[/var/lib/docker:{}]", + "Config.OnBuild": "[/usr/local/bin/python-build --dir /app/src]", + } + + for conf, value := range expected { + res := inspectField(c, imageID, conf) + if res != value { + c.Errorf("%s('%s'), expected %s", conf, res, value) + } + } +} + +func (s *DockerSuite) TestCommitChangeLabels(c *check.C) { + dockerCmd(c, "run", "--name", "test", "--label", "some=label", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "LABEL some=label2", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + c.Assert(inspectField(c, imageID, "Config.Labels"), checker.Equals, "map[some:label2]") + // check that container labels didn't change + c.Assert(inspectField(c, "test", "Config.Labels"), checker.Equals, "map[some:label]") +} diff --git a/integration-cli/docker_cli_config_create_test.go b/integration-cli/docker_cli_config_create_test.go new file mode 100644 index 0000000..b823254 --- /dev/null +++ b/integration-cli/docker_cli_config_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestConfigCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) + c.Assert(len(config.Spec.Labels), checker.Equals, 2) + c.Assert(config.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(config.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestConfigCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: name, + }, + Data: []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + fake := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: id, + }, + Data: []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("configs: %s", fake)) + + out, err := d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("config", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("config", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("config", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake[:5]) + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestConfigCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "configCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_config" + out, err := d.Cmd("config", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) +} diff --git a/integration-cli/docker_cli_config_inspect_test.go b/integration-cli/docker_cli_config_inspect_test.go new file mode 100644 index 0000000..ba4e80f --- /dev/null +++ b/integration-cli/docker_cli_config_inspect_test.go @@ -0,0 +1,68 @@ +// +build !windows + +package main + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigInspect(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) + + out, err := d.Cmd("config", "inspect", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var configs []swarm.Config + c.Assert(json.Unmarshal([]byte(out), &configs), checker.IsNil) + c.Assert(configs, checker.HasLen, 1) +} + +func (s *DockerSwarmSuite) TestConfigInspectMultiple(c *check.C) { + d := s.AddDaemon(c, true, true) + + testNames := []string{ + "test0", + "test1", + } + for _, n := range testNames { + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: n, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, n) + + } + + args := []string{ + "config", + "inspect", + } + args = append(args, testNames...) + out, err := d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var configs []swarm.Config + c.Assert(json.Unmarshal([]byte(out), &configs), checker.IsNil) + c.Assert(configs, checker.HasLen, 2) +} diff --git a/integration-cli/docker_cli_config_ls_test.go b/integration-cli/docker_cli_config_ls_test.go new file mode 100644 index 0000000..5c07012 --- /dev/null +++ b/integration-cli/docker_cli_config_ls_test.go @@ -0,0 +1,125 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigList(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName0 := "test0" + testName1 := "test1" + + // create config test0 + id0 := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName0, + Labels: map[string]string{"type": "test"}, + }, + Data: []byte("TESTINGDATA0"), + }) + c.Assert(id0, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id0)) + + config := d.GetConfig(c, id0) + c.Assert(config.Spec.Name, checker.Equals, testName0) + + // create config test1 + id1 := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName1, + Labels: map[string]string{"type": "production"}, + }, + Data: []byte("TESTINGDATA1"), + }) + c.Assert(id1, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id1)) + + config = d.GetConfig(c, id1) + c.Assert(config.Spec.Name, checker.Equals, testName1) + + // test by command `docker config ls` + out, err := d.Cmd("config", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by name `docker config ls --filter name=xxx` + args := []string{ + "config", + "ls", + "--filter", + "name=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + // test filter by id `docker config ls --filter id=xxx` + args = []string{ + "config", + "ls", + "--filter", + "id=" + id1, + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by label `docker config ls --filter label=xxx` + args = []string{ + "config", + "ls", + "--filter", + "label=type", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + args = []string{ + "config", + "ls", + "--filter", + "label=type=test", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + args = []string{ + "config", + "ls", + "--filter", + "label=type=production", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test invalid filter `docker config ls --filter noexisttype=xxx` + args = []string{ + "config", + "ls", + "--filter", + "noexisttype=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.NotNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, "Error response from daemon: Invalid filter 'noexisttype'") +} diff --git a/integration-cli/docker_cli_config_test.go b/integration-cli/docker_cli_config_test.go new file mode 100644 index 0000000..46fe456 --- /dev/null +++ b/integration-cli/docker_cli_config_test.go @@ -0,0 +1,150 @@ +package main + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/homedir" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestConfigHTTPHeader(c *check.C) { + testRequires(c, UnixCli) // Can't set/unset HOME on windows right now + // We either need a level of Go that supports Unsetenv (for cases + // when HOME/USERPROFILE isn't set), or we need to be able to use + // os/user but user.Current() only works if we aren't statically compiling + + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + headers = r.Header + })) + defer server.Close() + + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + result := icmd.RunCommand(dockerBinary, "-H="+server.URL[7:], "ps") + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) + + c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+os.Getenv("DOCKER_CLI_VERSION")+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", result.Combined())) + + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", result.Combined())) + +} + +func (s *DockerSuite) TestConfigDir(c *check.C) { + cDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(cDir) + + // First make sure pointing to empty dir doesn't generate an error + dockerCmd(c, "--config", cDir, "ps") + + // Test with env var too + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "ps"}, + Env: appendBaseEnv(true, "DOCKER_CONFIG="+cDir), + }).Assert(c, icmd.Success) + + // Start a server so we can check to see if the config file was + // loaded properly + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + })) + defer server.Close() + + // Create a dummy config file in our new config dir + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + tmpCfg := filepath.Join(cDir, "config.json") + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil, check.Commentf("Err creating file")) + + env := appendBaseEnv(false) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", cDir, "-H=" + server.URL[7:], "ps"}, + Env: env, + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps3 - Missing header")) + + // Reset headers and try again using env var this time + headers = map[string][]string{} + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", cDir, "-H=" + server.URL[7:], "ps"}, + Env: append(env, "DOCKER_CONFIG="+cDir), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps4 - Missing header")) + + // FIXME(vdemeester) should be a unit test + // Reset headers and make sure flag overrides the env var + headers = map[string][]string{} + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", cDir, "-H=" + server.URL[7:], "ps"}, + Env: append(env, "DOCKER_CONFIG=MissingDir"), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps5 - Missing header")) + + // FIXME(vdemeester) should be a unit test + // Reset headers and make sure flag overrides the env var. + // Almost same as previous but make sure the "MissingDir" isn't + // ignore - we don't want to default back to the env var. + headers = map[string][]string{} + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", "MissingDir", "-H=" + server.URL[7:], "ps"}, + Env: append(env, "DOCKER_CONFIG="+cDir), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value")) +} diff --git a/integration-cli/docker_cli_cp_from_container_test.go b/integration-cli/docker_cli_cp_from_container_test.go new file mode 100644 index 0000000..116f246 --- /dev/null +++ b/integration-cli/docker_cli_cp_from_container_test.go @@ -0,0 +1,488 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// docker cp CONTAINER:PATH LOCALPATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPath(tmpDir, "notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPathTrailingSep(tmpDir, "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Check that copying from a container to a local symlink copies to the symlink +// target and does not overwrite the local symlink itself. +func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // First, copy a file from the container to a symlink to a file. This + // should overwrite the symlink target contents with the source contents. + srcPath := containerCpPath(containerID, "/file2") + dstPath := cpPath(tmpDir, "symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a directory. This + // should copy the file into the symlink target directory. + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a file that does + // not exist (a broken symlink). This should create the target file with + // the contents of the source file. + dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = containerCpPath(containerID, "/dir2") + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory that does not exist (a broken symlink). This should create + // the target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpFromCaseA(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-a") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpFromCaseB(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-b") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPathTrailingSep(tmpDir, "testDir") + + err := runDockerCp(c, srcPath, dstDir, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpFromCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "file2") + + // Ensure the local file starts with different content. + c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPath(tmpDir, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + // Ensure that dstPath doesn't exist. + _, err := os.Stat(dstPath) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) + + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir1") + + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpFromCaseE(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-e") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPath(containerID, "dir1") + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpFromCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstDir := cpPath(tmpDir, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + dstPath := filepath.Join(resultDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseH(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-h") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "dir1") + "." + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove resultDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpFromCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstDir := cpPath(tmpDir, "dir2") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go new file mode 100644 index 0000000..f7ed459 --- /dev/null +++ b/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,665 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Ensure that an all-local path case returns an error. +func (s *DockerSuite) TestCpLocalOnly(c *check.C) { + err := runDockerCp(c, "foo", "bar", nil) + c.Assert(err, checker.NotNil) + + c.Assert(err.Error(), checker.Contains, "must specify at least one container source") +} + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func (s *DockerSuite) TestCpGarbagePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("../../../../../../../../../../../../", cpFullPath) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- garbage path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for garbage path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that relative paths are relative to the container's rootfs +func (s *DockerSuite) TestCpRelativePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + var relPath string + if path.IsAbs(cpFullPath) { + // normally this is `filepath.Rel("/", cpFullPath)` but we cannot + // get this unix-path manipulation on windows with filepath. + relPath = cpFullPath[1:] + } + c.Assert(path.IsAbs(cpFullPath), checker.True, check.Commentf("path %s was assumed to be an absolute path", cpFullPath)) + + dockerCmd(c, "cp", containerID+":"+relPath, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- relative path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for relative path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that absolute paths are relative to the container's rootfs +func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- absolute path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for absolute path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, "container_path") + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path") + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + // We should have copied a symlink *NOT* the file itself! + linkTarget, err := os.Readlink(tmpname) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpFullPath)) +} + +// Check that symlinks to a directory behave as expected when copying one from +// a container. +func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + linkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpTestPathParent)) + + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link/", testDir) + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testDir, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) +} + +// Check that symlinks to a directory behave as expected when copying one to a +// container. +func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testVol) + + // Create a test container with a local volume. We will test by copying + // to the volume path in the container which we can then verify locally. + out, _ := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox") + + containerID := strings.TrimSpace(out) + + // Create a temp directory to hold a test file nested in a directory. + testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This file will be at "/testDir/some/path/test" and will be copied into + // the test volume later. + hostTestFilename := filepath.Join(testDir, cpFullPath) + c.Assert(os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)), checker.IsNil) + c.Assert(ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)), checker.IsNil) + + // Now create another temp directory to hold a symlink to the + // "/testDir/some" directory. + linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(linkDir) + + // Then symlink "/linkDir/dir_link" to "/testdir/some". + linkTarget := filepath.Join(testDir, cpTestPathParent) + localLink := filepath.Join(linkDir, "dir_link") + c.Assert(os.Symlink(linkTarget, localLink), checker.IsNil) + + // Now copy that symlink into the test volume in the container. + dockerCmd(c, "cp", localLink, containerID+":/testVol") + + // This copy command should have copied the symlink *not* the target. + expectedPath := filepath.Join(testVol, "dir_link") + actualLinkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to read symlink at %q", expectedPath)) + + c.Assert(actualLinkTarget, checker.Equals, linkTarget) + + // Good, now remove that copied link for the next test. + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the test volume directory in the + // container. + dockerCmd(c, "cp", localLink+"/", containerID+":/testVol") + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testVol, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) + + // And this directory should contain the file copied from the host at the + // expected location: "/testVol/dir_link/path/test" + expectedFilepath := filepath.Join(testVol, "dir_link/path/test") + fileContents, err := ioutil.ReadFile(expectedFilepath) + c.Assert(err, checker.IsNil) + + c.Assert(string(fileContents), checker.Equals, cpHostContents) +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path", cpTestName) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- symlink path component can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for symlink path component + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that cp with unprivileged user doesn't return any error +func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, UnixCli) // uses chmod/su: not available on windows + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpdir) + + c.Assert(os.Chmod(tmpdir, 0777), checker.IsNil) + + result := icmd.RunCommand("su", "unprivilegeduser", "-c", + fmt.Sprintf("%s cp %s:%s %s", dockerBinary, containerID, cpTestName, tmpdir)) + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + outDir, err := ioutil.TempDir("", "cp-test-special-files") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/resolv.conf", outDir) + + expected := readContainerFile(c, containerID, "resolv.conf") + actual, err := ioutil.ReadFile(outDir + "/resolv.conf") + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/hosts + dockerCmd(c, "cp", containerID+":/etc/hosts", outDir) + + expected = readContainerFile(c, containerID, "hosts") + actual, err = ioutil.ReadFile(outDir + "/hosts") + + // Expected copied file to be duplicate of the container hosts + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/hostname", outDir) + + expected = readContainerFile(c, containerID, "hostname") + actual, err = ioutil.ReadFile(outDir + "/hostname") + c.Assert(err, checker.IsNil) + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) +} + +func (s *DockerSuite) TestCpVolumePath(c *check.C) { + // stat /tmp/cp-test-volumepath851508420/test gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + _, err = os.Create(tmpDir + "/test") + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual volume path + dockerCmd(c, "cp", containerID+":/foo", outDir) + + stat, err := os.Stat(outDir + "/foo") + c.Assert(err, checker.IsNil) + // expected copied content to be dir + c.Assert(stat.IsDir(), checker.True) + stat, err = os.Stat(outDir + "/foo/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy file nested in volume + dockerCmd(c, "cp", containerID+":/foo/bar", outDir) + + stat, err = os.Stat(outDir + "/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy Bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz", outDir) + stat, err = os.Stat(outDir + "/baz") + c.Assert(err, checker.IsNil) + // Expected `baz` to be a dir + c.Assert(stat.IsDir(), checker.True) + + // Copy file nested in bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz/test", outDir) + fb, err := ioutil.ReadFile(outDir + "/baz/test") + c.Assert(err, checker.IsNil) + fb2, err := ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) + + // Copy bind-mounted file + dockerCmd(c, "cp", containerID+":/test", outDir) + fb, err = ioutil.ReadFile(outDir + "/test") + c.Assert(err, checker.IsNil) + fb2, err = ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) +} + +func (s *DockerSuite) TestCpToDot(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + cwd, err := os.Getwd() + c.Assert(err, checker.IsNil) + defer os.Chdir(cwd) + c.Assert(os.Chdir(tmpdir), checker.IsNil) + dockerCmd(c, "cp", containerID+":/test", ".") + content, err := ioutil.ReadFile("./test") + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCpToStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "cp", containerID+":/test", "-"), + exec.Command("tar", "-vtf", "-")) + + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, "test") + c.Assert(out, checker.Contains, "-rw") +} + +func (s *DockerSuite) TestCpNameHasColon(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + dockerCmd(c, "cp", containerID+":/te:s:t", tmpdir) + content, err := ioutil.ReadFile(tmpdir + "/te:s:t") + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCopyAndRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + expectedMsg := "hello" + out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", expectedMsg) + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dockerCmd(c, "cp", fmt.Sprintf("%s:/etc/group", containerID), tmpDir) + + out, _ = dockerCmd(c, "start", "-a", containerID) + + c.Assert(strings.TrimSpace(out), checker.Equals, expectedMsg) +} + +func (s *DockerSuite) TestCopyCreatedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name", "test_cp", "-v", "/test", "busybox") + + tmpDir, err := ioutil.TempDir("", "test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + dockerCmd(c, "cp", "test_cp:/bin/sh", tmpDir) +} + +// test copy with option `-L`: following symbol link +// Check that symlinks to a file behave as expected when copying one from +// a container to host following symbol link +func (s *DockerSuite) TestCpSymlinkFromConToHostFollowSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" /dir_link") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + testDir, err := ioutil.TempDir("", "test-cp-symlink-container-to-host-follow-symlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + + expected := []byte(cpContainerContents) + actual, err := ioutil.ReadFile(expectedPath) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + os.Remove(expectedPath) + + // now test copy symbol link to a non-existing file in host + expectedPath = filepath.Join(testDir, "somefile_host") + // expectedPath shouldn't exist, if exists, remove it + if _, err := os.Lstat(expectedPath); err == nil { + os.Remove(expectedPath) + } + + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", expectedPath) + + actual, err = ioutil.ReadFile(expectedPath) + c.Assert(err, checker.IsNil) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + defer os.Remove(expectedPath) +} diff --git a/integration-cli/docker_cli_cp_to_container_test.go b/integration-cli/docker_cli_cp_to_container_test.go new file mode 100644 index 0000000..97e9aa1 --- /dev/null +++ b/integration-cli/docker_cli_cp_to_container_test.go @@ -0,0 +1,600 @@ +package main + +import ( + "os" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// docker cp LOCALPATH CONTAINER:PATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPathTrailingSep(tmpDir, "file1") + dstPath := containerCpPath(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + err = runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing path separator but exists as a +// file. Also test that we cannot overwrite an existing directory with a +// non-directory and cannot overwrite an existing +func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "dir1/file1-1") + dstPath := containerCpPathTrailingSep(containerID, "file1") + + // The client should encounter an error trying to stat the destination + // and then be unable to copy since the destination is asserted to be a + // directory but does not exist. + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + // The client should encounter an error trying to stat the destination and + // then decide to extract to the parent directory instead with a rebased + // name in the source archive, but this directory would overwrite the + // existing file with the same name. + err = runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) +} + +// Check that copying from a local path to a symlink in a container copies to +// the symlink target and does not overwrite the container symlink itself. +func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { + // stat /tmp/test-cp-to-symlink-destination-262430901/vol3 gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol := getTestDir(c, "test-cp-to-symlink-destination-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + + // First, copy a local file to a symlink to a file in the container. This + // should overwrite the symlink target contents with the source contents. + srcPath := cpPath(testVol, "file2") + dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a local file to a symlink to a directory in the container. + // This should copy the file into the symlink target directory. + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file to a symlink to a file that does not exist (a broken + // symlink) in the container. This should create the target file with the + // contents of the source file. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a directory in the + // container. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = cpPath(testVol, "/dir2") + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a local directory that does + // not exist (a broken symlink) in the container. This should create the + // target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpToCaseA(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + workDir: "/root", command: makeCatFileCommand("itWorks.txt"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-a") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpToCaseB(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("testDir/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-b") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPathTrailingSep(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstDir, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpToCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("file2"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/file2") + + // Ensure the container's file starts with the original content. + c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPath(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpToCaseE(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-e") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpToCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("dir2/dir1/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "/root/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir2/dir1/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseH(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-h") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpToCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpToCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("/dir2/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/dir2/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// The `docker cp` command should also ensure that you cannot +// write to a container rootfs that is marked as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + readOnly: true, workDir: "/root", + command: makeCatFileCommand("shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} + +// The `docker cp` command should also ensure that you +// cannot write to a volume that is mounted as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(tmpDir), workDir: "/root", + command: makeCatFileCommand("/vol_ro/shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} diff --git a/integration-cli/docker_cli_cp_to_container_unix_test.go b/integration-cli/docker_cli_cp_to_container_unix_test.go new file mode 100644 index 0000000..fa55b6e --- /dev/null +++ b/integration-cli/docker_cli_cp_to_container_unix_test.go @@ -0,0 +1,81 @@ +// +build !windows + +package main + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCpToContainerWithPermissions(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + tmpDir := getTestDir(c, "test-cp-to-host-with-permissions") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerName := "permtest" + + _, exc := dockerCmd(c, "create", "--name", containerName, "debian:jessie", "/bin/bash", "-c", "stat -c '%u %g %a' /permdirtest /permdirtest/permtest") + c.Assert(exc, checker.Equals, 0) + defer dockerCmd(c, "rm", "-f", containerName) + + srcPath := cpPath(tmpDir, "permdirtest") + dstPath := containerCpPath(containerName, "/") + c.Assert(runDockerCp(c, srcPath, dstPath, []string{"-a"}), checker.IsNil) + + out, err := startContainerGetOutput(c, containerName) + c.Assert(err, checker.IsNil, check.Commentf("output: %v", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "2 2 700\n65534 65534 400", check.Commentf("output: %v", out)) +} + +// Check ownership is root, both in non-userns and userns enabled modes +func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + tmpVolDir := getTestDir(c, "test-cp-tmpvol") + containerID := makeTestContainer(c, + testContainerOptions{volumes: []string{fmt.Sprintf("%s:/tmpvol", tmpVolDir)}}) + + tmpDir := getTestDir(c, "test-cp-to-check-ownership") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/tmpvol", "file1") + + err := runDockerCp(c, srcPath, dstPath, nil) + c.Assert(err, checker.IsNil) + + stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) + c.Assert(err, checker.IsNil) + uid, gid, err := getRootUIDGID() + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Copied file not owned by container root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Copied file not owned by container root GID")) +} + +func getRootUIDGID() (int, int, error) { + uidgid := strings.Split(filepath.Base(testEnv.DockerBasePath()), ".") + if len(uidgid) == 1 { + //user namespace remapping is not turned on; return 0 + return 0, 0, nil + } + uid, err := strconv.Atoi(uidgid[0]) + if err != nil { + return 0, 0, err + } + gid, err := strconv.Atoi(uidgid[1]) + if err != nil { + return 0, 0, err + } + return uid, gid, nil +} diff --git a/integration-cli/docker_cli_cp_utils_test.go b/integration-cli/docker_cli_cp_utils_test.go new file mode 100644 index 0000000..48aff90 --- /dev/null +++ b/integration-cli/docker_cli_cp_utils_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/archive" + "github.com/go-check/check" +) + +type fileType uint32 + +const ( + ftRegular fileType = iota + ftDir + ftSymlink +) + +type fileData struct { + filetype fileType + path string + contents string + uid int + gid int + mode int +} + +func (fd fileData) creationCommand() string { + var command string + + switch fd.filetype { + case ftRegular: + // Don't overwrite the file if it already exists! + command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path) + case ftDir: + command = fmt.Sprintf("mkdir -p %s", fd.path) + case ftSymlink: + command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path) + } + + return command +} + +func mkFilesCommand(fds []fileData) string { + commands := make([]string, len(fds)) + + for i, fd := range fds { + commands[i] = fd.creationCommand() + } + + return strings.Join(commands, " && ") +} + +var defaultFileData = []fileData{ + {ftRegular, "file1", "file1", 0, 0, 0666}, + {ftRegular, "file2", "file2", 0, 0, 0666}, + {ftRegular, "file3", "file3", 0, 0, 0666}, + {ftRegular, "file4", "file4", 0, 0, 0666}, + {ftRegular, "file5", "file5", 0, 0, 0666}, + {ftRegular, "file6", "file6", 0, 0, 0666}, + {ftRegular, "file7", "file7", 0, 0, 0666}, + {ftDir, "dir1", "", 0, 0, 0777}, + {ftRegular, "dir1/file1-1", "file1-1", 0, 0, 0666}, + {ftRegular, "dir1/file1-2", "file1-2", 0, 0, 0666}, + {ftDir, "dir2", "", 0, 0, 0666}, + {ftRegular, "dir2/file2-1", "file2-1", 0, 0, 0666}, + {ftRegular, "dir2/file2-2", "file2-2", 0, 0, 0666}, + {ftDir, "dir3", "", 0, 0, 0666}, + {ftRegular, "dir3/file3-1", "file3-1", 0, 0, 0666}, + {ftRegular, "dir3/file3-2", "file3-2", 0, 0, 0666}, + {ftDir, "dir4", "", 0, 0, 0666}, + {ftRegular, "dir4/file3-1", "file4-1", 0, 0, 0666}, + {ftRegular, "dir4/file3-2", "file4-2", 0, 0, 0666}, + {ftDir, "dir5", "", 0, 0, 0666}, + {ftSymlink, "symlinkToFile1", "file1", 0, 0, 0666}, + {ftSymlink, "symlinkToDir1", "dir1", 0, 0, 0666}, + {ftSymlink, "brokenSymlinkToFileX", "fileX", 0, 0, 0666}, + {ftSymlink, "brokenSymlinkToDirX", "dirX", 0, 0, 0666}, + {ftSymlink, "symlinkToAbsDir", "/root", 0, 0, 0666}, + {ftDir, "permdirtest", "", 2, 2, 0700}, + {ftRegular, "permdirtest/permtest", "perm_test", 65534, 65534, 0400}, +} + +func defaultMkContentCommand() string { + return mkFilesCommand(defaultFileData) +} + +func makeTestContentInDir(c *check.C, dir string) { + for _, fd := range defaultFileData { + path := filepath.Join(dir, filepath.FromSlash(fd.path)) + switch fd.filetype { + case ftRegular: + c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(fd.mode)), checker.IsNil) + case ftDir: + c.Assert(os.Mkdir(path, os.FileMode(fd.mode)), checker.IsNil) + case ftSymlink: + c.Assert(os.Symlink(fd.contents, path), checker.IsNil) + } + + if fd.filetype != ftSymlink && runtime.GOOS != "windows" { + c.Assert(os.Chown(path, fd.uid, fd.gid), checker.IsNil) + } + } +} + +type testContainerOptions struct { + addContent bool + readOnly bool + volumes []string + workDir string + command string +} + +func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) { + if options.addContent { + mkContentCmd := defaultMkContentCommand() + if options.command == "" { + options.command = mkContentCmd + } else { + options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command) + } + } + + if options.command == "" { + options.command = "#(nop)" + } + + args := []string{"run", "-d"} + + for _, volume := range options.volumes { + args = append(args, "-v", volume) + } + + if options.workDir != "" { + args = append(args, "-w", options.workDir) + } + + if options.readOnly { + args = append(args, "--read-only") + } + + args = append(args, "busybox", "/bin/sh", "-c", options.command) + + out, _ := dockerCmd(c, args...) + + containerID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + + exitCode := strings.TrimSpace(out) + if exitCode != "0" { + out, _ = dockerCmd(c, "logs", containerID) + } + c.Assert(exitCode, checker.Equals, "0", check.Commentf("failed to make test container: %s", out)) + + return +} + +func makeCatFileCommand(path string) string { + return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path) +} + +func cpPath(pathElements ...string) string { + localizedPathElements := make([]string, len(pathElements)) + for i, path := range pathElements { + localizedPathElements[i] = filepath.FromSlash(path) + } + return strings.Join(localizedPathElements, string(filepath.Separator)) +} + +func cpPathTrailingSep(pathElements ...string) string { + return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator) +} + +func containerCpPath(containerID string, pathElements ...string) string { + joined := strings.Join(pathElements, "/") + return fmt.Sprintf("%s:%s", containerID, joined) +} + +func containerCpPathTrailingSep(containerID string, pathElements ...string) string { + return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) +} + +func runDockerCp(c *check.C, src, dst string, params []string) (err error) { + c.Logf("running `docker cp %s %s %s`", strings.Join(params, " "), src, dst) + + args := []string{"cp"} + + for _, param := range params { + args = append(args, param) + } + + args = append(args, src, dst) + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out) + } + + return +} + +func startContainerGetOutput(c *check.C, containerID string) (out string, err error) { + c.Logf("running `docker start -a %s`", containerID) + + args := []string{"start", "-a", containerID} + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out) + } + + return +} + +func getTestDir(c *check.C, label string) (tmpDir string) { + var err error + + tmpDir, err = ioutil.TempDir("", label) + // unable to make temporary directory + c.Assert(err, checker.IsNil) + + return +} + +func isCpNotExist(err error) bool { + return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") +} + +func isCpDirNotExist(err error) bool { + return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) +} + +func isCpNotDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") +} + +func isCpCannotCopyDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) +} + +func isCpCannotCopyReadOnly(err error) bool { + return strings.Contains(err.Error(), "marked read-only") +} + +func isCannotOverwriteNonDirWithDir(err error) bool { + return strings.Contains(err.Error(), "cannot overwrite non-directory") +} + +func fileContentEquals(c *check.C, filename, contents string) (err error) { + c.Logf("checking that file %q contains %q\n", filename, contents) + + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return + } + + expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents)) + if err != nil { + return + } + + if !bytes.Equal(fileBytes, expectedBytes) { + err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes)) + } + + return +} + +func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) { + c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget) + + actualTarget, err := os.Readlink(symlink) + if err != nil { + return + } + + if actualTarget != expectedTarget { + err = fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget) + } + + return +} + +func containerStartOutputEquals(c *check.C, containerID, contents string) (err error) { + c.Logf("checking that container %q start output contains %q\n", containerID, contents) + + out, err := startContainerGetOutput(c, containerID) + if err != nil { + return + } + + if out != contents { + err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out) + } + + return +} + +func defaultVolumes(tmpDir string) []string { + if SameHostDaemon() { + return []string{ + "/vol1", + fmt.Sprintf("%s:/vol2", tmpDir), + fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")), + fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")), + } + } + + // Can't bind-mount volumes with separate host daemon. + return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"} +} diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go new file mode 100644 index 0000000..d4eb985 --- /dev/null +++ b/integration-cli/docker_cli_create_test.go @@ -0,0 +1,443 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/go-connections/nat" + "github.com/go-check/check" +) + +// Make sure we can create a simple container with some args +func (s *DockerSuite) TestCreateArgs(c *check.C) { + // Intentionally clear entrypoint, as the Windows busybox image needs an entrypoint, which breaks this test + out, _ := dockerCmd(c, "create", "--entrypoint=", "busybox", "command", "arg1", "arg2", "arg with space", "-c", "flags") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + ID string + Created time.Time + Path string + Args []string + Image string + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(string(cont.Path), checker.Equals, "command", check.Commentf("Unexpected container path. Expected command, received: %s", cont.Path)) + + b := false + expected := []string{"arg1", "arg2", "arg with space", "-c", "flags"} + for i, arg := range expected { + if arg != cont.Args[i] { + b = true + break + } + } + if len(cont.Args) != len(expected) || b { + c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) + } + +} + +// Make sure we can grow the container's rootfs at creation time. +func (s *DockerSuite) TestCreateGrowRootfs(c *check.C) { + // Windows and Devicemapper support growing the rootfs + if testEnv.DaemonPlatform() != "windows" { + testRequires(c, Devicemapper) + } + out, _ := dockerCmd(c, "create", "--storage-opt", "size=120G", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, cleanedContainerID, "HostConfig.StorageOpt") + c.Assert(inspectOut, checker.Equals, "map[size:120G]") +} + +// Make sure we cannot shrink the container's rootfs at creation time. +func (s *DockerSuite) TestCreateShrinkRootfs(c *check.C) { + testRequires(c, Devicemapper) + + // Ensure this fails because of the defaultBaseFsSize is 10G + out, _, err := dockerCmdWithError("create", "--storage-opt", "size=5G", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Container size cannot be smaller than") +} + +// Make sure we can set hostconfig options too +func (s *DockerSuite) TestCreateHostConfig(c *check.C) { + out, _ := dockerCmd(c, "create", "-P", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PublishAllPorts bool + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PublishAllPorts, check.NotNil, check.Commentf("Expected PublishAllPorts, got false")) +} + +func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 4, check.Commentf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings))) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1, check.Commentf("Expected 1 ports binding, for the port %s but found %s", k, v)) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + + } + +} + +func (s *DockerSuite) TestCreateWithLargePortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 65535) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + } + +} + +// "test123" should be printed by docker create + start +func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox", "echo", "test123") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "start", "-ai", cleanedContainerID) + c.Assert(out, checker.Equals, "test123\n", check.Commentf("container should've printed 'test123', got %q", out)) + +} + +func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + name := "test_create_volume" + dockerCmd(c, "create", "--name", name, "-v", prefix+slash+"foo", "busybox") + + dir, err := inspectMountSourceField(name, prefix+slash+"foo") + c.Assert(err, check.IsNil, check.Commentf("Error getting volume host path: %q", err)) + + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + c.Fatalf("Volume was not created") + } + if err != nil { + c.Fatalf("Error statting volume host path: %q", err) + } + +} + +func (s *DockerSuite) TestCreateLabels(c *check.C) { + name := "test_create_labels" + expected := map[string]string{"k1": "v1", "k2": "v2"} + dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") + + actual := make(map[string]string) + inspectFieldAndUnmarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { + imageName := "testcreatebuildlabel" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + LABEL k1=v1 k2=v2`)) + + name := "test_create_labels_from_image" + expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} + dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) + + actual := make(map[string]string) + inspectFieldAndUnmarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { + image := "busybox" + // Busybox on Windows does not implement hostname command + if testEnv.DaemonPlatform() == "windows" { + image = testEnv.MinimalBaseImage() + } + out, _ := dockerCmd(c, "run", "-h", "web.0", image, "hostname") + c.Assert(strings.TrimSpace(out), checker.Equals, "web.0", check.Commentf("hostname not set, expected `web.0`, got: %s", out)) + +} + +func (s *DockerSuite) TestCreateRM(c *check.C) { + // Test to make sure we can 'rm' a new container that is in + // "Created" state, and has ever been run. Test "rm -f" too. + + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + dockerCmd(c, "rm", cID) + + // Now do it again so we can "rm -f" this time + out, _ = dockerCmd(c, "create", "busybox") + + cID = strings.TrimSpace(out) + dockerCmd(c, "rm", "-f", cID) +} + +func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { + // Uses Linux specific functionality (--ipc) + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "create", "busybox") + id := strings.TrimSpace(out) + + dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") +} + +func (s *DockerSuite) TestCreateByImageID(c *check.C) { + imageName := "testcreatebyimageid" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + MAINTAINER dockerio`)) + imageID := getIDByName(c, imageName) + truncatedImageID := stringid.TruncateID(imageID) + + dockerCmd(c, "create", imageID) + dockerCmd(c, "create", truncatedImageID) + dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) + + // Ensure this fails + out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "invalid reference format"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } + + out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Unable to find image"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } +} + +func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-create") + + // Try create + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Try untrusted create to ensure we pushed the tag to the registry + cli.Docker(cli.Args("create", "--disable-content-trust=true", repoName)).Assert(c, SuccessDownloadedOnStderr) +} + +func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) + withTagName := fmt.Sprintf("%s:latest", repoName) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", withTagName) + cli.DockerCmd(c, "push", withTagName) + cli.DockerCmd(c, "rmi", withTagName) + + // Try trusted create on untrusted tag + cli.Docker(cli.Args("create", withTagName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: fmt.Sprintf("does not have trust data for %s", repoName), + }) +} + +func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-create") + + // Try create + cli.Docker(cli.Args("--config", "/tmp/docker-isolated-create", "create", repoName), trustedCmd).Assert(c, SuccessTagging) + defer os.RemoveAll("/tmp/docker-isolated-create") + + cli.DockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilcreate-local-config-dir") + c.Assert(err, check.IsNil) + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + + // Try create + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + c.Assert(err, check.IsNil) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Now, try creating with the original client from this new trust server. This should fail because the new root is invalid. + cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "could not rotate trust to a new trusted root", + }) +} + +func (s *DockerSuite) TestCreateStopSignal(c *check.C) { + name := "test_create_stop_signal" + dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") + + res := inspectFieldJSON(c, name, "Config.StopSignal") + c.Assert(res, checker.Contains, "9") + +} + +func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) { + name := "foo" + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + dir := prefix + slash + "home" + slash + "foo" + slash + "bar" + + dockerCmd(c, "create", "--name", name, "-w", dir, "busybox") + // Windows does not create the workdir until the container is started + if testEnv.DaemonPlatform() == "windows" { + dockerCmd(c, "start", name) + } + dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), prefix+slash+"tmp") +} + +func (s *DockerSuite) TestCreateWithInvalidLogOpts(c *check.C) { + name := "test-invalidate-log-opts" + out, _, err := dockerCmdWithError("create", "--name", name, "--log-opt", "invalid=true", "busybox") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown log opt") + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Not(checker.Contains), name) +} + +// #20972 +func (s *DockerSuite) TestCreate64ByteHexID(c *check.C) { + out := inspectField(c, "busybox", "Id") + imageID := strings.TrimPrefix(strings.TrimSpace(string(out)), "sha256:") + + dockerCmd(c, "create", imageID) +} + +// Test case for #23498 +func (s *DockerSuite) TestCreateUnsetEntrypoint(c *check.C) { + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "create", "--entrypoint=", name, "echo", "foo").Combined() + id := strings.TrimSpace(out) + c.Assert(id, check.Not(check.Equals), "") + out = cli.DockerCmd(c, "start", "-a", id).Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "foo") +} + +// #22471 +func (s *DockerSuite) TestCreateStopTimeout(c *check.C) { + name1 := "test_create_stop_timeout_1" + dockerCmd(c, "create", "--name", name1, "--stop-timeout", "15", "busybox") + + res := inspectFieldJSON(c, name1, "Config.StopTimeout") + c.Assert(res, checker.Contains, "15") + + name2 := "test_create_stop_timeout_2" + dockerCmd(c, "create", "--name", name2, "busybox") + + res = inspectFieldJSON(c, name2, "Config.StopTimeout") + c.Assert(res, checker.Contains, "null") +} diff --git a/integration-cli/docker_cli_create_unix_test.go b/integration-cli/docker_cli_create_unix_test.go new file mode 100644 index 0000000..1b0bb4a --- /dev/null +++ b/integration-cli/docker_cli_create_unix_test.go @@ -0,0 +1,43 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/go-check/check" +) + +// Test case for #30166 (target was not validated) +func (s *DockerSuite) TestCreateTmpfsMountsTarget(c *check.C) { + testRequires(c, DaemonIsLinux) + type testCase struct { + target string + expectedError string + } + cases := []testCase{ + { + target: ".", + expectedError: "mount path must be absolute", + }, + { + target: "foo", + expectedError: "mount path must be absolute", + }, + { + target: "/", + expectedError: "destination can't be '/'", + }, + { + target: "//", + expectedError: "destination can't be '/'", + }, + } + for _, x := range cases { + out, _, _ := dockerCmdWithError("create", "--tmpfs", x.target, "busybox", "sh") + if x.expectedError != "" && !strings.Contains(out, x.expectedError) { + c.Fatalf("mounting tmpfs over %q should fail with %q, but got %q", + x.target, x.expectedError, out) + } + } +} diff --git a/integration-cli/docker_cli_daemon_plugins_test.go b/integration-cli/docker_cli_daemon_plugins_test.go new file mode 100644 index 0000000..614cddc --- /dev/null +++ b/integration-cli/docker_cli_daemon_plugins_test.go @@ -0,0 +1,391 @@ +// +build linux + +package main + +import ( + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/mount" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// TestDaemonRestartWithPluginEnabled tests state restore for an enabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + s.d.Restart(c) + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "true") +} + +// TestDaemonRestartWithPluginDisabled tests state restore for a disabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName, "--disable"); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + s.d.Restart(c) + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "false") +} + +// TestDaemonKillLiveRestoreWithPlugins SIGKILLs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network) + + s.d.Start(c, "--live-restore") + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + s.d.Restart(c, "--live-restore") + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Kill(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network) + + s.d.Start(c, "--live-restore") + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + s.d.Restart(c, "--live-restore") + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Interrupt(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestDaemonShutdownWithPlugins shuts down running plugins. +func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network, SameHostDaemon) + + s.d.Start(c) + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + s.d.Restart(c) + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Interrupt(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + for { + if err := syscall.Kill(s.d.Pid(), 0); err == syscall.ESRCH { + break + } + } + + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + s.d.Start(c) + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestDaemonKillWithPlugins leaves plugins running. +func (s *DockerDaemonSuite) TestDaemonKillWithPlugins(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network, SameHostDaemon) + + s.d.Start(c) + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + s.d.Restart(c) + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Kill(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + // assert that plugins are running. + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestVolumePlugin tests volume creation using a plugin. +func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network) + + volName := "plugin-volume" + destDir := "/tmp/data/" + destFile := "foo" + + s.d.Start(c) + out, err := s.d.Cmd("plugin", "install", pName, "--grant-all-permissions") + if err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + pluginID, err := s.d.Cmd("plugin", "inspect", "-f", "{{.Id}}", pName) + pluginID = strings.TrimSpace(pluginID) + if err != nil { + c.Fatalf("Could not retrieve plugin ID: %v %s", err, pluginID) + } + mountpointPrefix := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs") + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, false) + + }() + + out, err = s.d.Cmd("volume", "create", "-d", pName, volName) + if err != nil { + c.Fatalf("Could not create volume: %v %s", err, out) + } + defer func() { + if out, err := s.d.Cmd("volume", "remove", volName); err != nil { + c.Fatalf("Could not remove volume: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("volume", "ls") + if err != nil { + c.Fatalf("Could not list volume: %v %s", err, out) + } + c.Assert(out, checker.Contains, volName) + c.Assert(out, checker.Contains, pName) + + mountPoint, err := s.d.Cmd("volume", "inspect", volName, "--format", "{{.Mountpoint}}") + if err != nil { + c.Fatalf("Could not inspect volume: %v %s", err, mountPoint) + } + mountPoint = strings.TrimSpace(mountPoint) + + out, err = s.d.Cmd("run", "--rm", "-v", volName+":"+destDir, "busybox", "touch", destDir+destFile) + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs", mountPoint, destFile) + _, err = os.Lstat(path) + c.Assert(err, checker.IsNil) + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, true) +} + +func (s *DockerDaemonSuite) TestGraphdriverPlugin(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, Network, IsAmd64, DaemonIsLinux, overlay2Supported, ExperimentalDaemon) + + s.d.Start(c) + + // install the plugin + plugin := "cpuguy83/docker-overlay2-graphdriver-plugin" + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", plugin) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // restart the daemon with the plugin set as the storage driver + s.d.Restart(c, "-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1") + + // run a container + out, err = s.d.Cmd("run", "--rm", "busybox", "true") // this will pull busybox using the plugin + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, DaemonIsLinux, Network, IsAmd64) + + s.d.Start(c, "--live-restore=true") + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, err = s.d.Cmd("volume", "create", "--driver", pName, "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + s.d.Restart(c, "--live-restore=true") + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "in use") + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "rm", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) { + mounts, err := mount.GetMounts() + if err != nil { + return false, err + } + for _, mnt := range mounts { + if strings.HasPrefix(mnt.Mountpoint, mountpointPrefix) { + return true, nil + } + } + return false, nil +} + +func (s *DockerDaemonSuite) TestPluginListFilterEnabled(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pNameWithTag, "--disable") + c.Assert(err, check.IsNil, check.Commentf(out)) + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pNameWithTag); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("plugin", "ls", "--filter", "enabled=true") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), pName) + + out, err = s.d.Cmd("plugin", "ls", "--filter", "enabled=false") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "false") + + out, err = s.d.Cmd("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) +} + +func (s *DockerDaemonSuite) TestPluginListFilterCapability(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pNameWithTag, "--disable") + c.Assert(err, check.IsNil, check.Commentf(out)) + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pNameWithTag); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("plugin", "ls", "--filter", "capability=volumedriver") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + + out, err = s.d.Cmd("plugin", "ls", "--filter", "capability=authz") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), pName) + + out, err = s.d.Cmd("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) +} diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go new file mode 100644 index 0000000..fc24e50 --- /dev/null +++ b/integration-cli/docker_cli_daemon_test.go @@ -0,0 +1,2986 @@ +// +build linux + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "crypto/tls" + "crypto/x509" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + units "github.com/docker/go-units" + "github.com/docker/libnetwork/iptables" + "github.com/docker/libtrust" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// TestLegacyDaemonCommand test starting docker daemon using "deprecated" docker daemon +// command. Remove this test when we remove this. +func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) { + cmd := exec.Command(dockerBinary, "daemon", "--storage-driver=vfs", "--debug") + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("could not start daemon using 'docker daemon'")) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { + s.d.StartWithBusybox(c) + + cli.Docker( + cli.Args("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"), + cli.Daemon(s.d), + ).Assert(c, icmd.Success) + + cli.Docker( + cli.Args("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"), + cli.Daemon(s.d), + ).Assert(c, icmd.Success) + + testRun := func(m map[string]bool, prefix string) { + var format string + for cont, shouldRun := range m { + out := cli.Docker(cli.Args("ps"), cli.Daemon(s.d)).Assert(c, icmd.Success).Combined() + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + if shouldRun != strings.Contains(out, cont) { + c.Fatalf(format, prefix, cont) + } + } + } + + testRun(map[string]bool{"top1": true, "top2": true}, "") + + s.d.Restart(c) + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { + s.d.StartWithBusybox(c) + + if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + c.Fatal(err, out) + } + + s.d.Restart(c) + + if out, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + c.Fatal(err, out) + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") + c.Assert(err, check.IsNil) + + if _, err := inspectMountPointJSON(out, "/foo"); err != nil { + c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) + } +} + +// #11008 +func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out)) + + testRun := func(m map[string]bool, prefix string) { + var format string + for name, shouldRun := range m { + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out)) + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name)) + } + } + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "") + + out, err = s.d.Cmd("stop", "top1") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("stop", "top2") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // both stopped + testRun(map[string]bool{"top1": false, "top2": false}, "") + + s.d.Restart(c) + + // restart=always running + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") + + out, err = s.d.Cmd("start", "top2") + c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) + + s.d.Restart(c) + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") + +} + +func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + // wait test1 to stop + hostArgs := []string{"--host", s.d.Sock()} + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // record last start time + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + lastStartTime := out + + s.d.Restart(c) + + // test1 shouldn't restart at all + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 0, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // make sure test1 isn't restarted when daemon restart + // if "StartAt" time updates, means test1 was once restarted. + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Equals, lastStartTime, check.Commentf("test1 shouldn't start after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { + s.d.Start(c, "--iptables=false") +} + +// Make sure we cannot shrink base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { + testRequires(c, Devicemapper) + s.d.Start(c) + + oldBasesizeBytes := s.d.GetBaseDeviceSize(c) + var newBasesizeBytes int64 = 1073741824 //1GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.NotNil, check.Commentf("daemon should not have started as new base device size is less than existing base device size: %v", err)) + // 'err != nil' is expected behaviour, no new daemon started, + // so no need to stop daemon. + if err != nil { + return + } + } + s.d.Stop(c) +} + +// Make sure we can grow base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { + testRequires(c, Devicemapper) + s.d.Start(c) + + oldBasesizeBytes := s.d.GetBaseDeviceSize(c) + + var newBasesizeBytes int64 = 53687091200 //50GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes)))) + } + + err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) + + basesizeAfterRestart := s.d.GetBaseDeviceSize(c) + newBasesize, err := convertBasesize(newBasesizeBytes) + c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) + c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) + s.d.Stop(c) +} + +func convertBasesize(basesizeBytes int64) (int64, error) { + basesize := units.HumanSize(float64(basesizeBytes)) + basesize = strings.Trim(basesize, " ")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + if err != nil { + return 0, err + } + return int64(basesizeFloat) * 1024 * 1024 * 1024, nil +} + +// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and +// no longer has an IP associated, we should gracefully handle that case and associate +// an IP with it rather than fail daemon start +func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { + // rather than depending on brctl commands to verify docker0 is created and up + // let's start the daemon and stop it, and then make a modification to run the + // actual test + s.d.Start(c) + s.d.Stop(c) + + // now we will remove the ip from docker0 and then try starting the daemon + icmd.RunCommand("ip", "addr", "flush", "dev", "docker0").Assert(c, icmd.Success) + + if err := s.d.StartWithError(); err != nil { + warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" + c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { + s.d.StartWithBusybox(c) + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + ipTablesSearchString := "tcp dpt:80" + + // get output from iptables with container running + verifyIPTablesContains(c, ipTablesSearchString) + + s.d.Stop(c) + + // get output from iptables after restart + verifyIPTablesDoesNotContains(c, ipTablesSearchString) +} + +func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { + s.d.StartWithBusybox(c) + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + verifyIPTablesContains(c, ipTablesSearchString) + + s.d.Restart(c) + + // make sure the container is not running + runningOut, err := s.d.Cmd("inspect", "--format={{.State.Running}}", "top") + if err != nil { + c.Fatalf("Could not inspect on container: %s, %v", runningOut, err) + } + if strings.TrimSpace(runningOut) != "true" { + c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + } + + // get output from iptables after restart + verifyIPTablesContains(c, ipTablesSearchString) +} + +func verifyIPTablesContains(c *check.C, ipTablesSearchString string) { + result := icmd.RunCommand("iptables", "-nvL") + result.Assert(c, icmd.Success) + if !strings.Contains(result.Combined(), ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, result.Combined()) + } +} + +func verifyIPTablesDoesNotContains(c *check.C, ipTablesSearchString string) { + result := icmd.RunCommand("iptables", "-nvL") + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), ipTablesSearchString) { + c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, result.Combined()) + } +} + +// TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge +// has the fe80::1 address and that a container is assigned a link-local address +func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *check.C) { + testRequires(c, IPv6) + + setupV6(c) + defer teardownV6(c) + + s.d.StartWithBusybox(c, "--ipv6") + + iface, err := net.InterfaceByName("docker0") + if err != nil { + c.Fatalf("Error getting docker0 interface: %v", err) + } + + addrs, err := iface.Addrs() + if err != nil { + c.Fatalf("Error getting addresses for docker0 interface: %v", err) + } + + var found bool + expected := "fe80::1/64" + + for i := range addrs { + if addrs[i].String() == expected { + found = true + break + } + } + + if !found { + c.Fatalf("Bridge does not have an IPv6 Address") + } + + if out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { + c.Fatalf("Could not run container: %s, %v", out, err) + } + + out, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.LinkLocalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip == nil { + c.Fatalf("Container should have a link-local IPv6 address") + } + + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip != nil { + c.Fatalf("Container should not have a global IPv6 address: %v", out) + } +} + +// TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR +// that running containers are given a link-local and global IPv6 address +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + // Delete the docker0 bridge if its left around from previous daemon. It has to be recreated with + // ipv6 enabled + deleteInterface(c, "docker0") + + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100") + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + out = strings.Trim(out, " \r\n'") + + c.Assert(err, checker.IsNil, check.Commentf(out)) + + ip := net.ParseIP(out) + c.Assert(ip, checker.NotNil, check.Commentf("Container should have a global IPv6 address")) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.IPv6Gateway}}", "ipv6test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:2::100", check.Commentf("Container should have a global IPv6 gateway")) +} + +// TestDaemonIPv6FixedCIDRAndMac checks that when the daemon is started with ipv6 fixed CIDR +// the running containers are given an IPv6 address derived from the MAC address and the ipv6 fixed CIDR +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + // Delete the docker0 bridge if its left around from previous daemon. It has to be recreated with + // ipv6 enabled + deleteInterface(c, "docker0") + + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:1::/64") + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox") + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + c.Assert(err, checker.IsNil) + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:1::aabb:ccdd:eeff") +} + +// TestDaemonIPv6HostMode checks that when the running a container with +// network=host the host ipv6 addresses are not removed +func (s *DockerDaemonSuite) TestDaemonIPv6HostMode(c *check.C) { + testRequires(c, SameHostDaemon) + deleteInterface(c, "docker0") + + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64") + out, err := s.d.Cmd("run", "-itd", "--name=hostcnt", "--network=host", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) + + out, err = s.d.Cmd("exec", "hostcnt", "ip", "-6", "addr", "show", "docker0") + out = strings.Trim(out, " \r\n'") + + c.Assert(out, checker.Contains, "2001:db8:2::1") +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { + c.Assert(s.d.StartWithError("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { + s.d.Start(c, "--log-level=debug") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { + // we creating new daemons to create new logFile + s.d.Start(c, "--log-level=fatal") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { + s.d.Start(c, "-D") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { + s.d.Start(c, "--debug") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { + s.d.Start(c, "--debug", "--log-level=fatal") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { + listeningPorts := [][]string{ + {"0.0.0.0", "0.0.0.0", "5678"}, + {"127.0.0.1", "127.0.0.1", "1234"}, + {"localhost", "127.0.0.1", "1235"}, + } + + cmdArgs := make([]string, 0, len(listeningPorts)*2) + for _, hostDirective := range listeningPorts { + cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) + } + + s.d.StartWithBusybox(c, cmdArgs...) + + for _, hostDirective := range listeningPorts { + output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") + if err == nil { + c.Fatalf("Container should not start, expected port already allocated error: %q", output) + } else if !strings.Contains(output, "port is already allocated") { + c.Fatalf("Expected port is already allocated error: %q", output) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + s.d.Start(c) + s.d.Stop(c) + + k, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + kid := k.KeyID() + // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) + if len(kid) != 59 { + c.Fatalf("Bad key ID: %s", kid) + } +} + +// GH#11320 - verify that the daemon exits on failure properly +// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means +// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required +func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { + //attempt to start daemon with incorrect flags (we know -b and --bip conflict) + if err := s.d.StartWithError("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { + //verify we got the right error + if !strings.Contains(err.Error(), "Daemon exited") { + c.Fatalf("Expected daemon not to start, got %v", err) + } + // look in the log and make sure we got the message that daemon is shutting down + icmd.RunCommand("grep", "Error starting daemon", s.d.LogFileName()).Assert(c, icmd.Success) + } else { + //if we didn't get an error and the daemon is running, this is a failure + c.Fatal("Conflicting options should cause the daemon to error out with a failure") + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { + d := s.d + err := d.StartWithError("--bridge", "nosuchbridge") + c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) + defer d.Restart(c) + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bridge", bridgeName) + + ipTablesSearchString := bridgeIPNet.String() + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) + + _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP, err := d.FindContainerIP("ExtContainer") + c.Assert(err, checker.IsNil) + ip := net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeNone(c *check.C) { + // start with bridge none + d := s.d + d.StartWithBusybox(c, "--bridge", "none") + defer d.Restart(c) + + // verify docker0 iface is not there + icmd.RunCommand("ifconfig", "docker0").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "Device not found", + }) + + // verify default "bridge" network is not there + out, err := d.Cmd("network", "inspect", "bridge") + c.Assert(err, check.NotNil, check.Commentf("\"bridge\" network should not be present if daemon started with --bridge=none")) + c.Assert(strings.Contains(out, "No such network"), check.Equals, true) +} + +func createInterface(c *check.C, ifType string, ifName string, ipNet string) { + icmd.RunCommand("ip", "link", "add", "name", ifName, "type", ifType).Assert(c, icmd.Success) + icmd.RunCommand("ifconfig", ifName, ipNet, "up").Assert(c, icmd.Success) +} + +func deleteInterface(c *check.C, ifName string) { + icmd.RunCommand("ip", "link", "delete", ifName).Assert(c, icmd.Success) + icmd.RunCommand("iptables", "-t", "nat", "--flush").Assert(c, icmd.Success) + icmd.RunCommand("iptables", "--flush").Assert(c, icmd.Success) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { + // TestDaemonBridgeIP Steps + // 1. Delete the existing docker0 Bridge + // 2. Set --bip daemon configuration and start the new Docker Daemon + // 3. Check if the bip config has taken effect using ifconfig and iptables commands + // 4. Launch a Container and make sure the IP-Address is in the expected subnet + // 5. Delete the docker0 Bridge + // 6. Restart the Docker Daemon (via deferred action) + // This Restart takes care of bringing docker0 interface back to auto-assigned IP + + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1/24" + ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + d.StartWithBusybox(c, "--bip", bridgeIP) + defer d.Restart(c) + + ifconfigSearchString := ip.String() + icmd.RunCommand("ifconfig", defaultNetworkBridge).Assert(c, icmd.Expected{ + Out: ifconfigSearchString, + }) + + ipTablesSearchString := bridgeIPNet.String() + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) + + _, err := d.Cmd("run", "-d", "--name", "test", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP, err := d.FindContainerIP("test") + c.Assert(err, checker.IsNil) + ip = net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { + s.d.Start(c) + defer s.d.Restart(c) + s.d.Stop(c) + + // now we will change the docker0's IP and then try starting the daemon + bridgeIP := "192.169.100.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + icmd.RunCommand("ifconfig", "docker0", bridgeIP).Assert(c, icmd.Success) + + s.d.Start(c, "--bip", bridgeIP) + + //check if the iptables contains new bridgeIP MASQUERADE rule + ipTablesSearchString := bridgeIPNet.String() + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} + d.StartWithBusybox(c, args...) + defer d.Restart(c) + + for i := 0; i < 4; i++ { + cName := "Container" + strconv.Itoa(i) + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + if err != nil { + c.Assert(strings.Contains(out, "no available IPv4 addresses"), check.Equals, true, + check.Commentf("Could not run a Container : %s %s", err.Error(), out)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "10.2.2.1/16" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") + defer s.d.Restart(c) + + out, err := d.Cmd("run", "-d", "--name", "bb", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + defer d.Cmd("stop", "bb") + + out, err = d.Cmd("exec", "bb", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(out, checker.Equals, "10.2.2.0\n") + + out, err = d.Cmd("run", "--rm", "busybox", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Equals, "10.2.2.2\n") +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "172.27.42.1/16" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bridge", bridgeName, "--fixed-cidr", bridgeIP) + defer s.d.Restart(c) + + out, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + cid1 := strings.TrimSpace(out) + defer d.Cmd("stop", cid1) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + + d.StartWithBusybox(c, "--bip", bridgeIPNet) + defer d.Restart(c) + + expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(err, checker.IsNil) + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", + bridgeIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + gatewayIP := "192.169.1.254" + + d.StartWithBusybox(c, "--bip", bridgeIPNet, "--default-gateway", gatewayIP) + defer d.Restart(c) + + expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(err, checker.IsNil) + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Explicit default gateway should be %s, but default route was '%s'", + gatewayIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + // Program a custom default gateway outside of the container subnet, daemon should accept it and start + s.d.StartWithBusybox(c, "--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") + + deleteInterface(c, defaultNetworkBridge) + s.d.Restart(c) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *check.C) { + c.Skip("swarm isn't supported") + + testRequires(c, DaemonIsLinux, SameHostDaemon) + + // Start daemon without docker0 bridge + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + discoveryBackend := "consul://consuladdr:consulport/some/path" + s.d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend)) + + // Start daemon with docker0 bridge + result := icmd.RunCommand("ifconfig", defaultNetworkBridge) + c.Assert(result, icmd.Matches, icmd.Success) + + s.d.Restart(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend)) +} + +func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { + d := s.d + + ipStr := "192.170.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + args := []string{"--ip", ip.String()} + d.StartWithBusybox(c, args...) + defer d.Restart(c) + + out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.NotNil, + check.Commentf("Running a container must fail with an invalid --ip option")) + c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) + + ifName := "dummy" + createInterface(c, "dummy", ifName, ipStr) + defer deleteInterface(c, ifName) + + _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.IsNil) + + result := icmd.RunCommand("iptables", "-t", "nat", "-nvL") + result.Assert(c, icmd.Success) + regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) + matched, _ := regexp.MatchString(regex, result.Combined()) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) +} + +func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { + testRequires(c, bridgeNfIptables) + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer d.Restart(c) + + result := icmd.RunCommand("iptables", "-nvL", "FORWARD") + result.Assert(c, icmd.Success) + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, result.Combined()) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) + + // Pinging another container must fail with --icc=false + pingContainers(c, d, true) + + ipStr := "192.171.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + ifName := "icc-dummy" + + createInterface(c, "dummy", ifName, ipStr) + + // But, Pinging external or a Host interface must succeed + pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) + runArgs := []string{"run", "--rm", "busybox", "sh", "-c", pingCmd} + _, err := d.Cmd(runArgs...) + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer d.Restart(c) + + result := icmd.RunCommand("iptables", "-nvL", "FORWARD") + result.Assert(c, icmd.Success) + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, result.Combined()) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) + + out, err := d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + s.d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer s.d.Restart(c) + + _, err := s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") + c.Assert(err, check.IsNil) + + childIP, err := s.d.FindContainerIP("child") + c.Assert(err, checker.IsNil) + parentIP, err := s.d.FindContainerIP("parent") + c.Assert(err, checker.IsNil) + + sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} + destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} + if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules not found") + } + + s.d.Cmd("rm", "--link", "parent/http") + if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules should be removed when unlink") + } + + s.d.Cmd("kill", "child") + s.d.Cmd("kill", "parent") +} + +func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + + s.d.StartWithBusybox(c, "--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024") + + out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") + if err != nil { + c.Fatal(out, err) + } + + outArr := strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile := strings.TrimSpace(outArr[0]) + nproc := strings.TrimSpace(outArr[1]) + + if nofile != "42" { + c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } + + // Now restart daemon with a new default + s.d.Restart(c, "--default-ulimit", "nofile=43") + + out, err = s.d.Cmd("start", "-a", "test") + if err != nil { + c.Fatal(err) + } + + outArr = strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile = strings.TrimSpace(outArr[0]) + nproc = strings.TrimSpace(outArr[1]) + + if nofile != "43" { + c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } +} + +// #11315 +func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { + s.d.StartWithBusybox(c) + + if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { + c.Fatal(err, out) + } + + s.d.Restart(c) + + if out, err := s.d.Cmd("start", "test2"); err != nil { + c.Fatal(err, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, check.IsNil, check.Commentf(out)) + id, err := s.d.GetIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.GetIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=none") + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.GetIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=none") + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.GetIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=none") + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("logs", "test") + c.Assert(err, check.NotNil, check.Commentf("Logs should fail with 'none' driver")) + expected := `configured logging driver does not support reading` + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverShouldBeIgnoredForBuild(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=splunk") + + out, err := s.d.Cmd("build") + out, code, err := s.d.BuildImageWithOut("busyboxs", ` + FROM busybox + RUN echo foo`, false) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) + c.Assert(err, check.IsNil, comment) + c.Assert(code, check.Equals, 0, comment) + c.Assert(out, checker.Contains, "foo", comment) +} + +func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { + dir, err := ioutil.TempDir("", "socket-cleanup-test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + sockPath := filepath.Join(dir, "docker.sock") + s.d.Start(c, "--host", "unix://"+sockPath) + + if _, err := os.Stat(sockPath); err != nil { + c.Fatal("socket does not exist") + } + + s.d.Stop(c) + + if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { + c.Fatal("unix socket is not cleaned up") + } +} + +func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { + type Config struct { + Crv string `json:"crv"` + D string `json:"d"` + Kid string `json:"kid"` + Kty string `json:"kty"` + X string `json:"x"` + Y string `json:"y"` + } + + os.Remove("/etc/docker/key.json") + s.d.Start(c) + s.d.Stop(c) + + config := &Config{} + bytes, err := ioutil.ReadFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error reading key.json file: %s", err) + } + + // byte[] to Data-Struct + if err := json.Unmarshal(bytes, &config); err != nil { + c.Fatalf("Error Unmarshal: %s", err) + } + + //replace config.Kid with the fake value + config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" + + // NEW Data-Struct to byte[] + newBytes, err := json.Marshal(&config) + if err != nil { + c.Fatalf("Error Marshal: %s", err) + } + + // write back + if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { + c.Fatalf("Error ioutil.WriteFile: %s", err) + } + + defer os.Remove("/etc/docker/key.json") + + if err := s.d.StartWithError(); err == nil { + c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) + } + + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + + if !strings.Contains(string(content), "Public Key ID does not match") { + c.Fatalf("Missing KeyID message from daemon logs: %s", string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") + if err != nil { + c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) + } + containerID := strings.TrimSpace(out) + + if out, err := s.d.Cmd("kill", containerID); err != nil { + c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) + } + + s.d.Restart(c) + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("wait", containerID); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on a stopped (killed) container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +// TestHTTPSInfo connects via two-way authenticated HTTPS to the info endpoint +func (s *DockerDaemonSuite) TestHTTPSInfo(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr) + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } +} + +// TestHTTPSRun connects via two-way authenticated HTTPS to the create, attach, start, and wait endpoints. +// https://github.com/docker/docker/issues/19280 +func (s *DockerDaemonSuite) TestHTTPSRun(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + s.d.StartWithBusybox(c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr) + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "run", "busybox", "echo", "TLS response", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } + + if !strings.Contains(out, "TLS response") { + c.Fatalf("expected output to include `TLS response`, got %v", out) + } +} + +// TestTLSVerify verifies that --tlsverify=false turns on tls +func (s *DockerDaemonSuite) TestTLSVerify(c *check.C) { + out, err := exec.Command(dockerdBinary, "--tlsverify=false").CombinedOutput() + if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") { + c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out)) + } +} + +// TestHTTPSInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func (s *DockerDaemonSuite) TestHTTPSInfoRogueCert(c *check.C) { + const ( + errBadCertificate = "bad certificate" + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr) + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errBadCertificate) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) + } +} + +// TestHTTPSInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { + const ( + errCaUnknown = "x509: certificate signed by unknown authority" + testDaemonRogueHTTPSAddr = "tcp://localhost:4272" + ) + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-rogue-cert.pem", + "--tlskey", "fixtures/https/server-rogue-key.pem", + "-H", testDaemonRogueHTTPSAddr) + + args := []string{ + "--host", testDaemonRogueHTTPSAddr, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errCaUnknown) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) + } +} + +func pingContainers(c *check.C, d *daemon.Daemon, expectFailure bool) { + var dargs []string + if d != nil { + dargs = []string{"--host", d.Sock()} + } + + args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, args...) + + args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") + pingCmd := "ping -c 1 %s -W 1" + args = append(args, fmt.Sprintf(pingCmd, "alias1")) + _, _, err := dockerCmdWithError(args...) + + if expectFailure { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + args = append(dargs, "rm", "-f", "container1") + dockerCmd(c, args...) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { + s.d.StartWithBusybox(c) + + socket := filepath.Join(s.d.Folder, "docker.sock") + + out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + s.d.Restart(c) +} + +// os.Kill should kill daemon ungracefully, leaving behind container mounts. +// A subsequent daemon restart shoud clean up said mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) { + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + d.StartWithBusybox(c) + + out, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + c.Assert(d.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // kill the container + icmd.RunCommand(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id).Assert(c, icmd.Success) + + // restart daemon. + d.Restart(c) + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) + + d.Stop(c) +} + +// os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + d.StartWithBusybox(c) + + out, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + // Send SIGINT and daemon should clean up + c.Assert(d.Signal(os.Interrupt), check.IsNil) + // Wait for the daemon to stop. + c.Assert(<-d.Wait, checker.IsNil) + + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + s.d.StartWithBusybox(c, "-b", "none") + + out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) + + out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) + // the extra grep and awk clean up the output of `ip` to only list the number and name of + // interfaces, allowing for different versions of ip (e.g. inside and outside the container) to + // be used while still verifying that the interface list is the exact same + cmd := exec.Command("sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err != nil { + c.Fatal("Failed to get host network interface") + } + out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout), + check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { + s.d.StartWithBusybox(t) + if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil { + t.Fatal(out, err) + } + + s.d.Restart(t) + // Container 'test' should be removed without error + if out, err := s.d.Cmd("rm", "test"); err != nil { + t.Fatal(out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { + s.d.StartWithBusybox(c) + out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") + if err != nil { + c.Fatal(out, err) + } + + // Get sandbox key via inspect + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns") + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + fileName := strings.Trim(out, " \r\n'") + + if out, err := s.d.Cmd("stop", "netns"); err != nil { + c.Fatal(out, err) + } + + // Test if the file still exists + icmd.RunCommand("stat", "-c", "%n", fileName).Assert(c, icmd.Expected{ + Out: fileName, + }) + + // Remove the container and restart the daemon + if out, err := s.d.Cmd("rm", "netns"); err != nil { + c.Fatal(out, err) + } + + s.d.Restart(c) + + // Test again and see now the netns file does not exist + icmd.RunCommand("stat", "-c", "%n", fileName).Assert(c, icmd.Expected{ + Err: "No such file or directory", + ExitCode: 1, + }) +} + +// tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored +func (s *DockerDaemonSuite) TestDaemonTLSVerifyIssue13964(c *check.C) { + host := "tcp://localhost:4271" + s.d.Start(c, "-H", host) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "-H", host, "info"}, + Env: []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"}, + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error during connect", + }) +} + +func setupV6(c *check.C) { + // Hack to get the right IPv6 address on docker0, which has already been created + result := icmd.RunCommand("ip", "addr", "add", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Success) +} + +func teardownV6(c *check.C) { + result := icmd.RunCommand("ip", "addr", "del", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Success) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") + c.Assert(err, check.IsNil) + id := strings.TrimSpace(out) + + _, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("wait", id) + c.Assert(err, check.IsNil) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(out, check.Equals, "") + + s.d.Restart(c) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, id[:12]) +} + +func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { + s.d.StartWithBusybox(c, "--log-opt=max-size=1k") + name := "logtest" + out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "max-size:1k") + c.Assert(out, checker.Contains, "max-file:5") + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Type }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "json-file") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { + s.d.StartWithBusybox(c) + if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { + c.Fatal(err, out) + } + if out, err := s.d.Cmd("pause", "test"); err != nil { + c.Fatal(err, out) + } + s.d.Restart(c) + + errchan := make(chan error) + go func() { + out, err := s.d.Cmd("start", "test") + if err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + name := strings.TrimSpace(out) + if name != "test" { + errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on start a container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + s.d.Restart(c) + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil, check.Commentf("should not be able to remove in use volume after daemon restart")) + c.Assert(out, checker.Contains, "in use") +} + +func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { + s.d.Start(c) + + _, err := s.d.Cmd("volume", "create", "test") + c.Assert(err, check.IsNil) + s.d.Restart(c) + + _, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.IsNil) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { + c.Skip("syslog log-driver isn't supported") + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + c.Assert(d.StartWithError("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:42"), check.NotNil) + expected := "Failed to set log opts: syslog-address should be in form proto://address" + icmd.RunCommand("grep", expected, d.LogFileName()).Assert(c, icmd.Success) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { + c.Skip("fluentd isn't supported") + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + c.Assert(d.StartWithError("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) + expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " + icmd.RunCommand("grep", expected, d.LogFileName()).Assert(c, icmd.Success) +} + +// FIXME(vdemeester) Use a new daemon instance instead of the Suite one +func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { + s.d.UseDefaultHost = true + defer func() { + s.d.UseDefaultHost = false + }() + s.d.Start(c) +} + +// FIXME(vdemeester) Use a new daemon instance instead of the Suite one +func (s *DockerDaemonSuite) TestDaemonStartWithDefaultTLSHost(c *check.C) { + s.d.UseDefaultTLSHost = true + defer func() { + s.d.UseDefaultTLSHost = false + }() + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem") + + // The client with --tlsverify should also use default host localhost:2376 + tmpHost := os.Getenv("DOCKER_HOST") + defer func() { + os.Setenv("DOCKER_HOST", tmpHost) + }() + + os.Setenv("DOCKER_HOST", "") + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } + + // ensure when connecting to the server that only a single acceptable CA is requested + contents, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + rootCert, err := helpers.ParseCertificatePEM(contents) + c.Assert(err, checker.IsNil) + rootPool := x509.NewCertPool() + rootPool.AddCert(rootCert) + + var certRequestInfo *tls.CertificateRequestInfo + conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort), &tls.Config{ + RootCAs: rootPool, + GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { + certRequestInfo = cri + cert, err := tls.LoadX509KeyPair("fixtures/https/client-cert.pem", "fixtures/https/client-key.pem") + if err != nil { + return nil, err + } + return &cert, nil + }, + }) + c.Assert(err, checker.IsNil) + conn.Close() + + c.Assert(certRequestInfo, checker.NotNil) + c.Assert(certRequestInfo.AcceptableCAs, checker.HasLen, 1) + c.Assert(certRequestInfo.AcceptableCAs[0], checker.DeepEquals, rootCert.RawSubject) +} + +func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + bridgeIP := "192.169.1.1" + bridgeRange := bridgeIP + "/30" + + s.d.StartWithBusybox(c, "--bip", bridgeRange) + defer s.d.Restart(c) + + var cont int + for { + contName := fmt.Sprintf("container%d", cont) + _, err := s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") + if err != nil { + // pool exhausted + break + } + ip, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.IPAddress}}'", contName) + c.Assert(err, check.IsNil) + + c.Assert(ip, check.Not(check.Equals), bridgeIP) + cont++ + } +} + +// Test daemon for no space left on device error +func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, Network) + + testDir, err := ioutil.TempDir("", "no-space-left-on-device-test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + c.Assert(mount.MakeRShared(testDir), checker.IsNil) + defer mount.Unmount(testDir) + + // create a 2MiB image and mount it as graph root + // Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile) + dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=3 count=0") + icmd.RunCommand("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img")).Assert(c, icmd.Success) + + result := icmd.RunCommand("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img")) + result.Assert(c, icmd.Success) + loopname := strings.TrimSpace(string(result.Combined())) + defer exec.Command("losetup", "-d", loopname).Run() + + dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", fmt.Sprintf("mkdir -p /test/test-mount && mount -t ext4 -no loop,rw %v /test/test-mount", loopname)) + defer mount.Unmount(filepath.Join(testDir, "test-mount")) + + s.d.Start(c, "--data-root", filepath.Join(testDir, "test-mount")) + defer s.d.Stop(c) + + // pull a repository large enough to fill the mount point + pullOut, err := s.d.Cmd("pull", "registry:2") + c.Assert(err, checker.NotNil, check.Commentf(pullOut)) + c.Assert(pullOut, checker.Contains, "no space left on device") +} + +// Test daemon restart with container links + auto restart +func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { + s.d.StartWithBusybox(c) + + parent1Args := []string{} + parent2Args := []string{} + wg := sync.WaitGroup{} + maxChildren := 10 + chErr := make(chan error, maxChildren) + + for i := 0; i < maxChildren; i++ { + wg.Add(1) + name := fmt.Sprintf("test%d", i) + + if i < maxChildren/2 { + parent1Args = append(parent1Args, []string{"--link", name}...) + } else { + parent2Args = append(parent2Args, []string{"--link", name}...) + } + + go func() { + _, err := s.d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") + chErr <- err + wg.Done() + }() + } + + wg.Wait() + close(chErr) + for err := range chErr { + c.Assert(err, check.IsNil) + } + + parent1Args = append([]string{"run", "-d"}, parent1Args...) + parent1Args = append(parent1Args, []string{"--name=parent1", "--restart=always", "busybox", "top"}...) + parent2Args = append([]string{"run", "-d"}, parent2Args...) + parent2Args = append(parent2Args, []string{"--name=parent2", "--restart=always", "busybox", "top"}...) + + _, err := s.d.Cmd(parent1Args...) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd(parent2Args...) + c.Assert(err, check.IsNil) + + s.d.Stop(c) + // clear the log file -- we don't need any of it but may for the next part + // can ignore the error here, this is just a cleanup + os.Truncate(s.d.LogFileName(), 0) + s.d.Start(c) + + for _, num := range []string{"1", "2"} { + out, err := s.d.Cmd("inspect", "-f", "{{ .State.Running }}", "parent"+num) + c.Assert(err, check.IsNil) + if strings.TrimSpace(out) != "true" { + log, _ := ioutil.ReadFile(s.d.LogFileName()) + c.Fatalf("parent container is not running\n%s", string(log)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + name := "cgroup-test" + + s.d.StartWithBusybox(c, "--cgroup-parent", cgroupParent) + defer s.d.Restart(c) + + out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup") + c.Assert(err, checker.IsNil) + cgroupPaths := testutil.ParseCgroupPaths(string(out)) + c.Assert(len(cgroupPaths), checker.Not(checker.Equals), 0, check.Commentf("unexpected output - %q", string(out))) + out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(string(out)) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("Cgroup path for container (%s) doesn't found in cgroups file: %s", expectedCgroup, cgroupPaths)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + s.d.Restart(c) + + // should fail since test is not running yet + out, err = s.d.Cmd("start", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + + out, err = s.d.Cmd("start", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "1 packets transmitted, 1 packets received"), check.Equals, true, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test2", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + test2ID := strings.TrimSpace(out) + + out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top") + test3ID := strings.TrimSpace(out) + + s.d.Restart(c) + + out, err = s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name")) + // this one is no longer needed, removing simplifies the remainder of the test + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("ps", "-a", "--no-trunc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + lines := strings.Split(strings.TrimSpace(out), "\n")[1:] + + test2validated := false + test3validated := false + for _, line := range lines { + fields := strings.Fields(line) + names := fields[len(fields)-1] + switch fields[0] { + case test2ID: + c.Assert(names, check.Equals, "test2,test3/abc") + test2validated = true + case test3ID: + c.Assert(names, check.Equals, "test3") + test3validated = true + } + } + + c.Assert(test2validated, check.Equals, true) + c.Assert(test3validated, check.Equals, true) +} + +// TestDaemonRestartWithKilledRunningContainer requires live restore of running containers +func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + s.d.StartWithBusybox(t) + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop(t) + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + pid = strings.TrimSpace(pid) + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // kill the container + icmd.RunCommand(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", cid).Assert(t, icmd.Success) + + // Give time to containerd to process the command if we don't + // the exit event might be received after we do the inspect + result := icmd.RunCommand("kill", "-0", pid) + for result.ExitCode == 0 { + time.Sleep(1 * time.Second) + // FIXME(vdemeester) should we check it doesn't error out ? + result = icmd.RunCommand("kill", "-0", pid) + } + + // restart the daemon + s.d.Start(t) + + // Check that we've got the correct exit code + out, err := s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "143" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "143", out, cid) + } + +} + +// os.Kill should kill daemon ungracefully, leaving behind live containers. +// The live containers should be known to the restarted daemon. Stopping +// them now, should remove the mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d.StartWithBusybox(c, "--live-restore") + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + c.Assert(s.d.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // restart daemon. + s.d.Start(c, "--live-restore") + + // container should be running. + out, err = s.d.Cmd("inspect", "--format={{.State.Running}}", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + out = strings.TrimSpace(out) + if out != "true" { + c.Fatalf("Container %s expected to stay alive after daemon restart", id) + } + + // 'docker stop' should work. + out, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +// TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers. +func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + s.d.StartWithBusybox(t, "--live-restore") + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop(t) + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + + // pause the container + if _, err := s.d.Cmd("pause", cid); err != nil { + t.Fatal(cid, err) + } + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // resume the container + result := icmd.RunCommand( + ctrBinary, + "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", + "containers", "resume", cid) + t.Assert(result, icmd.Matches, icmd.Success) + + // Give time to containerd to process the command if we don't + // the resume event might be received after we do the inspect + waitAndAssert(t, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + result := icmd.RunCommand("kill", "-0", strings.TrimSpace(pid)) + return result.ExitCode, nil + }, checker.Equals, 0) + + // restart the daemon + s.d.Start(t, "--live-restore") + + // Check that we've got the correct status + out, err := s.d.Cmd("inspect", "-f", "{{.State.Status}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "running" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "running", out, cid) + } + if _, err := s.d.Cmd("kill", cid); err != nil { + t.Fatal(err) + } +} + +// TestRunLinksChanged checks that creating a new container with the same name does not update links +// this ensures that the old, pre gh#16032 functionality continues on +func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link=test:abc", "busybox", "sh", "-c", "ping -c 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "1 packets transmitted, 1 packets received") + + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") + + s.d.Restart(c) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") +} + +func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + infoLog := "\x1b[34mINFO\x1b" + + b := bytes.NewBuffer(nil) + done := make(chan bool) + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + go func() { + io.Copy(b, p) + done <- true + }() + + // Enable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=false") + s.d.Stop(c) + // Wait for io.Copy() before checking output + <-done + c.Assert(b.String(), checker.Contains, infoLog) + + b.Reset() + + // "tty" is already closed in prev s.d.Stop(), + // we have to close the other side "p" and open another pair of + // pty for the next test. + p.Close() + p, tty, err = pty.Open() + c.Assert(err, checker.IsNil) + + go func() { + io.Copy(b, p) + done <- true + }() + + // Disable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=true") + s.d.Stop(c) + // Wait for io.Copy() before checking output + <-done + c.Assert(b.String(), check.Not(check.Equals), "") + c.Assert(b.String(), check.Not(checker.Contains), infoLog) +} + +func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + debugLog := "\x1b[37mDEBU\x1b" + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + s.d.StartWithLogFile(tty, "--debug") + s.d.Stop(c) + c.Assert(b.String(), checker.Contains, debugLog) +} + +func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + daemonConfig := `{ "debug" : false }` + configFile, err := ioutil.TempFile("", "test-daemon-discovery-backend-config-reload-config") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file for config reload")) + configFilePath := configFile.Name() + defer func() { + configFile.Close() + os.RemoveAll(configFile.Name()) + }() + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + // --log-level needs to be set so that d.Start() doesn't add --debug causing + // a conflict with the config + s.d.Start(c, "--config-file", configFilePath, "--log-level=info") + + // daemon config file + daemonConfig = `{ + "debug" : false + }` + + err = configFile.Truncate(0) + c.Assert(err, checker.IsNil) + _, err = configFile.Seek(0, os.SEEK_SET) + c.Assert(err, checker.IsNil) + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + err = s.d.ReloadConfig() + c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) + + _, err = s.d.Cmd("info") + c.Assert(err, checker.IsNil) + +} + +// Test for #21956 +func (s *DockerDaemonSuite) TestDaemonLogOptions(c *check.C) { + c.Skip("Syslog and json-file aren't supported") + + s.d.StartWithBusybox(c, "--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514") + + out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("inspect", "--format='{{.HostConfig.LogConfig}}'", id) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "{json-file map[]}") +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { + s.d.Start(c, "--max-concurrent-uploads=6", "--max-concurrent-downloads=8") + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-downloads" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 7, "max-concurrent-downloads" : 9 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + // syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-uploads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 1, "max-concurrent-downloads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + // syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "labels":["foo=bar"] }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { + s.d.StartWithBusybox(c, "-b=none", "--iptables=false") + out, code, err := s.d.BuildImageWithOut("busyboxs", + `FROM busybox + RUN cat /etc/hosts`, false) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) + c.Assert(err, check.IsNil, comment) + c.Assert(code, check.Equals, 0, comment) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSFlagsInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + s.d.StartWithBusybox(c, "--dns", "1.2.3.4", "--dns-search", "example.com", "--dns-opt", "timeout:3") + + expectedOutput := "nameserver 1.2.3.4" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + expectedOutput = "search example.com" + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + expectedOutput = "options timeout:3" + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { + conf, err := ioutil.TempFile("", "config-file-") + c.Assert(err, check.IsNil) + configName := conf.Name() + conf.Close() + defer os.Remove(configName) + + config := ` +{ + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + s.d.StartWithBusybox(c, "--config-file", configName) + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Reset config to only have the default + config = ` +{ + "runtimes": { + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + config = ` +{ + "runtimes": { + "runc": { + "path": "my-runc" + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) + + // Check that we can select a default runtime + config = ` +{ + "default-runtime": "vm", + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { + s.d.StartWithBusybox(c, "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Start a daemon without any extra runtimes + s.d.Stop(c) + s.d.StartWithBusybox(c) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + s.d.Stop(c) + c.Assert(s.d.StartWithError("--add-runtime", "runc=my-runc"), checker.NotNil) + + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) + + // Check that we can select a default runtime + s.d.Stop(c) + s.d.StartWithBusybox(c, "--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) { + s.d.StartWithBusybox(c) + + // top1 will exist after daemon restarts + out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top1: %v", out)) + // top2 will be removed after daemon restarts + out, err = s.d.Cmd("run", "-d", "--rm", "--name", "top2", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top2: %v", out)) + + out, err = s.d.Cmd("ps") + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should be running")) + c.Assert(out, checker.Contains, "top2", check.Commentf("top2 should be running")) + + // now restart daemon gracefully + s.d.Restart(c) + + out, err = s.d.Cmd("ps", "-a") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should exist after daemon restarts")) + c.Assert(out, checker.Not(checker.Contains), "top2", check.Commentf("top2 should be removed after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *check.C) { + s.d.StartWithBusybox(c) + + containerName := "error-values" + // Make a container with both a non 0 exit code and an error message + // We explicitly disable `--init` for this test, because `--init` is enabled by default + // on "experimental". Enabling `--init` results in a different behavior; because the "init" + // process itself is PID1, the container does not fail on _startup_ (i.e., `docker-init` starting), + // but directly after. The exit code of the container is still 127, but the Error Message is not + // captured, so `.State.Error` is empty. + // See the discussion on https://github.com/docker/docker/pull/30227#issuecomment-274161426, + // and https://github.com/docker/docker/pull/26061#r78054578 for more information. + out, err := s.d.Cmd("run", "--name", containerName, "--init=false", "busybox", "toto") + c.Assert(err, checker.NotNil) + + // Check that those values were saved on disk + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + errMsg1, err := s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + errMsg1 = strings.TrimSpace(errMsg1) + c.Assert(err, checker.IsNil) + c.Assert(errMsg1, checker.Contains, "executable file not found") + + // now restart daemon + s.d.Restart(c) + + // Check that those values are still around + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, errMsg1) +} + +func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { + testRequires(c, SameHostDaemon) + d := s.d + d.StartWithBusybox(c) + + // hack to be able to side-load a container config + out, err := d.Cmd("create", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.Stop(c) + <-d.Wait + + imageID := strings.TrimSpace(out) + volumeID := stringid.GenerateNonCryptoID() + vfsPath := filepath.Join(d.Root, "vfs", "dir", volumeID) + c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) + + config := []byte(` + { + "ID": "` + id + `", + "Name": "hello", + "Driver": "` + d.StorageDriver() + `", + "Image": "` + imageID + `", + "Config": {"Image": "busybox:latest"}, + "NetworkSettings": {}, + "Volumes": { + "/bar":"/foo", + "/foo": "` + vfsPath + `", + "/quux":"/quux" + }, + "VolumesRW": { + "/bar": true, + "/foo": true, + "/quux": false + } + } + `) + + configPath := filepath.Join(d.Root, "containers", id, "config.v2.json") + c.Assert(ioutil.WriteFile(configPath, config, 600), checker.IsNil) + d.Start(c) + + out, err = d.Cmd("inspect", "--type=container", "--format={{ json .Mounts }}", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + type mount struct { + Name string + Source string + Destination string + Driver string + RW bool + } + + ls := []mount{} + err = json.NewDecoder(strings.NewReader(out)).Decode(&ls) + c.Assert(err, checker.IsNil) + + expected := []mount{ + {Source: "/foo", Destination: "/bar", RW: true}, + {Name: volumeID, Destination: "/foo", RW: true}, + {Source: "/quux", Destination: "/quux", RW: false}, + } + c.Assert(ls, checker.HasLen, len(expected)) + + for _, m := range ls { + var matched bool + for _, x := range expected { + if m.Source == x.Source && m.Destination == x.Destination && m.RW == x.RW || m.Name != x.Name { + matched = true + break + } + } + c.Assert(matched, checker.True, check.Commentf("did find match for %+v", m)) + } +} + +func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + dockerProxyPath, err := exec.LookPath("docker-proxy") + c.Assert(err, checker.IsNil) + tmpDir, err := ioutil.TempDir("", "test-docker-proxy") + c.Assert(err, checker.IsNil) + + newProxyPath := filepath.Join(tmpDir, "docker-proxy") + cmd := exec.Command("cp", dockerProxyPath, newProxyPath) + c.Assert(cmd.Run(), checker.IsNil) + + // custom one + s.d.StartWithBusybox(c, "--userland-proxy-path", newProxyPath) + out, err := s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // try with the original one + s.d.Restart(c, "--userland-proxy-path", dockerProxyPath) + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // not exist + s.d.Restart(c, "--userland-proxy-path", "/does/not/exist") + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "driver failed programming external connectivity on endpoint") + c.Assert(out, checker.Contains, "/does/not/exist: no such file or directory") +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) { + testRequires(c, SameHostDaemon) + s.d.StartWithBusybox(c, "--shutdown-timeout=3") + + _, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + c.Assert(s.d.Signal(syscall.SIGINT), checker.IsNil) + + select { + case <-s.d.Wait: + case <-time.After(5 * time.Second): + } + + expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "shutdown-timeout" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "shutdown-timeout" : 5 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + + select { + case <-s.d.Wait: + case <-time.After(3 * time.Second): + } + + expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for 29342 +func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d.StartWithBusybox(c, "--live-restore") + + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && touch /adduser_end && top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.WaitRun("top") + + // Wait for shell command to be completed + _, err = s.d.Cmd("exec", "top", "sh", "-c", `for i in $(seq 1 5); do if [ -e /adduser_end ]; then rm -f /adduser_end && break; else sleep 1 && false; fi; done`) + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for shell command to be completed")) + + out1, err := s.d.Cmd("exec", "-u", "test", "top", "id") + // uid=100(test) gid=101(test) groups=101(test) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out1)) + + // restart daemon. + s.d.Restart(c, "--live-restore") + + out2, err := s.d.Cmd("exec", "-u", "test", "top", "id") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out2)) + c.Assert(out2, check.Equals, out1, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) +} + +func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux, overlayFSSupported, SameHostDaemon) + s.d.StartWithBusybox(c, "--live-restore", "--storage-driver", "overlay") + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.WaitRun("top") + + // restart daemon. + s.d.Restart(c, "--live-restore", "--storage-driver", "overlay") + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // test if the rootfs mountpoint still exist + mountpoint, err := s.d.InspectField("top", ".GraphDriver.Data.MergedDir") + c.Assert(err, check.IsNil) + f, err := os.Open("/proc/self/mountinfo") + c.Assert(err, check.IsNil) + defer f.Close() + sc := bufio.NewScanner(f) + for sc.Scan() { + line := sc.Text() + if strings.Contains(line, mountpoint) { + c.Fatalf("mountinfo should not include the mountpoint of stop container") + } + } + + out, err = s.d.Cmd("rm", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) +} + +// #29598 +func (s *DockerDaemonSuite) TestRestartPolicyWithLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d.StartWithBusybox(c, "--live-restore") + + out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("output: %s", out)) + id := strings.TrimSpace(out) + + type state struct { + Running bool + StartedAt time.Time + } + out, err = s.d.Cmd("inspect", "-f", "{{json .State}}", id) + c.Assert(err, checker.IsNil, check.Commentf("output: %s", out)) + + var origState state + err = json.Unmarshal([]byte(strings.TrimSpace(out)), &origState) + c.Assert(err, checker.IsNil) + + s.d.Restart(c, "--live-restore") + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", id) + c.Assert(err, check.IsNil) + pidint, err := strconv.Atoi(strings.TrimSpace(pid)) + c.Assert(err, check.IsNil) + c.Assert(pidint, checker.GreaterThan, 0) + c.Assert(syscall.Kill(pidint, syscall.SIGKILL), check.IsNil) + + ticker := time.NewTicker(50 * time.Millisecond) + timeout := time.After(10 * time.Second) + + for range ticker.C { + select { + case <-timeout: + c.Fatal("timeout waiting for container restart") + default: + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .State}}", id) + c.Assert(err, checker.IsNil, check.Commentf("output: %s", out)) + + var newState state + err = json.Unmarshal([]byte(strings.TrimSpace(out)), &newState) + c.Assert(err, checker.IsNil) + + if !newState.Running { + continue + } + if newState.StartedAt.After(origState.StartedAt) { + break + } + } + + out, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil, check.Commentf("output: %s", out)) +} + +func (s *DockerDaemonSuite) TestShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + size := 67108864 * 2 + pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + s.d.StartWithBusybox(c, "--default-shm-size", fmt.Sprintf("%v", size)) + + name := "shm1" + out, err := s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) +} + +func (s *DockerDaemonSuite) TestShmSizeReload(c *check.C) { + testRequires(c, DaemonIsLinux) + + configPath, err := ioutil.TempDir("", "test-daemon-shm-size-reload-config") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file for config reload")) + defer os.RemoveAll(configPath) // clean up + configFile := filepath.Join(configPath, "config.json") + + size := 67108864 * 2 + configData := []byte(fmt.Sprintf(`{"default-shm-size": "%dM"}`, size/1024/1024)) + c.Assert(ioutil.WriteFile(configFile, configData, 0666), checker.IsNil, check.Commentf("could not write temp file for config reload")) + pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + s.d.StartWithBusybox(c, "--config-file", configFile) + + name := "shm1" + out, err := s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) + + size = 67108864 * 3 + configData = []byte(fmt.Sprintf(`{"default-shm-size": "%dM"}`, size/1024/1024)) + c.Assert(ioutil.WriteFile(configFile, configData, 0666), checker.IsNil, check.Commentf("could not write temp file for config reload")) + pattern = regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + err = s.d.ReloadConfig() + c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) + + name = "shm2" + out, err = s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) +} diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go new file mode 100644 index 0000000..3e95a73 --- /dev/null +++ b/integration-cli/docker_cli_diff_test.go @@ -0,0 +1,98 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +// ensure that an added file shows up in docker diff +func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { + containerCmd := `mkdir /foo; echo xyzzy > /foo/bar` + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd).Combined() + + // Wait for it to exit as cannot diff a running container on Windows, and + // it will take a few seconds to exit. Also there's no way in Windows to + // differentiate between an Add or a Modify, and all files are under + // a "Files/" prefix. + containerID := strings.TrimSpace(out) + lookingFor := "A /foo/bar" + if testEnv.DaemonPlatform() == "windows" { + cli.WaitExited(c, containerID, 60*time.Second) + lookingFor = "C Files/foo/bar" + } + + cleanCID := strings.TrimSpace(out) + out = cli.DockerCmd(c, "diff", cleanCID).Combined() + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains(line, lookingFor) { + found = true + break + } + } + c.Assert(found, checker.True) +} + +// test to ensure GH #3840 doesn't occur any more +func (s *DockerSuite) TestDiffEnsureInitLayerFilesAreIgnored(c *check.C) { + testRequires(c, DaemonIsLinux) + // this is a list of files which shouldn't show up in `docker diff` + initLayerFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerenv"} + containerCount := 5 + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < containerCount; i++ { + containerCmd := `echo foo > /root/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + for _, filename := range initLayerFiles { + c.Assert(out, checker.Not(checker.Contains), filename) + } + } +} + +func (s *DockerSuite) TestDiffEnsureDefaultDevs(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/mqueue": true, + "A /dev/kmsg": true, + "A /dev/fd": true, + "A /dev/ptmx": true, + "A /dev/null": true, + "A /dev/random": true, + "A /dev/stdout": true, + "A /dev/stderr": true, + "A /dev/tty1": true, + "A /dev/stdin": true, + "A /dev/tty": true, + "A /dev/urandom": true, + "A /dev/zero": true, + } + + for _, line := range strings.Split(out, "\n") { + c.Assert(line == "" || expected[line], checker.True, check.Commentf(line)) + } +} + +// https://github.com/docker/docker/pull/14381#discussion_r33859347 +func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { + out, _, err := dockerCmdWithError("diff", "") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "Container name cannot be empty") +} diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go new file mode 100644 index 0000000..74c5fe9 --- /dev/null +++ b/integration-cli/docker_cli_events_test.go @@ -0,0 +1,801 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strings" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { + name := "events-time-format-test" + + // Start stopwatch, generate an event + start := daemonTime(c) + time.Sleep(1100 * time.Millisecond) // so that first event occur in different second from since (just for the case) + dockerCmd(c, "run", "--rm", "--name", name, "busybox", "true") + time.Sleep(1100 * time.Millisecond) // so that until > since + end := daemonTime(c) + + // List of available time formats to --since + unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } + rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } + duration := func(t time.Time) string { return time.Now().Sub(t).String() } + + // --since=$start must contain only the 'untag' event + for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { + since, until := f(start), f(end) + out, _ := dockerCmd(c, "events", "--since="+since, "--until="+until) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, name, "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) + } +} + +func (s *DockerSuite) TestEventsUntag(c *check.C) { + image := "busybox" + dockerCmd(c, "tag", image, "utest:tag1") + dockerCmd(c, "tag", image, "utest:tag2") + dockerCmd(c, "rmi", "utest:tag1") + dockerCmd(c, "rmi", "utest:tag2") + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "events", "--since=1"}, + Timeout: time.Millisecond * 2500, + }) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + events := strings.Split(result.Stdout(), "\n") + nEvents := len(events) + // The last element after the split above will be an empty string, so we + // get the two elements before the last, which are the untags we're + // looking for. + for _, v := range events[nEvents-3 : nEvents-1] { + c.Assert(v, checker.Contains, "untag", check.Commentf("event should be untag")) + } +} + +func (s *DockerSuite) TestEventsLimit(c *check.C) { + // Windows: Limit to 4 goroutines creating containers in order to prevent + // timeouts creating so many containers simultaneously. This is a due to + // a bug in the Windows platform. It will be fixed in a Windows Update. + numContainers := 17 + numConcurrentContainers := numContainers + if testEnv.DaemonPlatform() == "windows" { + numConcurrentContainers = 4 + } + sem := make(chan bool, numConcurrentContainers) + errChan := make(chan error, numContainers) + + args := []string{"run", "--rm", "busybox", "true"} + for i := 0; i < numContainers; i++ { + sem <- true + go func() { + defer func() { <-sem }() + out, err := exec.Command(dockerBinary, args...).CombinedOutput() + if err != nil { + err = fmt.Errorf("%v: %s", err, string(out)) + } + errChan <- err + }() + } + + // Wait for all goroutines to finish + for i := 0; i < cap(sem); i++ { + sem <- true + } + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " "))) + } + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + nEvents := len(events) - 1 + c.Assert(nEvents, checker.Equals, 256, check.Commentf("events should be limited to 256, but received %d", nEvents)) +} + +func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "container-events-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerEventsAttrSort(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--filter", "container=container-events-test", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 3) //Missing expected event + matchedEvents := 0 + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if matches["eventType"] == "container" && matches["action"] == "create" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } else if matches["eventType"] == "container" && matches["action"] == "start" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } + } + c.Assert(matchedEvents, checker.Equals, 2, check.Commentf("missing events for container container-events-test:\n%s", out)) +} + +func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "since-epoch-test", "busybox", "true") + timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) + timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) + out, _ := dockerCmd(c, "events", "--since", timeBeginning, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsImageTag(c *check.C) { + time.Sleep(1 * time.Second) // because API has seconds granularity + since := daemonUnixTime(c) + image := "testimageevents:tag" + dockerCmd(c, "tag", "busybox", image) + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1, check.Commentf("was expecting 1 event. out=%s", out)) + event := strings.TrimSpace(events[0]) + + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, image), checker.True, check.Commentf("matches: %v\nout:\n%s", matches, out)) + c.Assert(matches["action"], checker.Equals, "tag") +} + +func (s *DockerSuite) TestEventsImagePull(c *check.C) { + // TODO Windows: Enable this test once pull and reliable image names are available + testRequires(c, DaemonIsLinux) + since := daemonUnixTime(c) + testRequires(c, Network) + + dockerCmd(c, "pull", "hello-world") + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + matches := eventstestutils.ScanMap(event) + c.Assert(matches["id"], checker.Equals, "hello-world:latest") + c.Assert(matches["action"], checker.Equals, "pull") + +} + +func (s *DockerSuite) TestEventsImageImport(c *check.C) { + // TODO Windows CI. This should be portable once export/import are + // more reliable (@swernli) + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + since := daemonUnixTime(c) + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil, check.Commentf("import failed with output: %q", out)) + imageRef := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=import") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageRef, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "import", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsImageLoad(c *check.C) { + testRequires(c, DaemonIsLinux) + myImageName := "footest:v1" + dockerCmd(c, "tag", "busybox", myImageName) + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + longImageID := strings.TrimSpace(out) + c.Assert(longImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty")) + + dockerCmd(c, "save", "-o", "saveimg.tar", myImageName) + dockerCmd(c, "rmi", myImageName) + out, _ = dockerCmd(c, "images", "-q", myImageName) + noImageID := strings.TrimSpace(out) + c.Assert(noImageID, checker.Equals, "", check.Commentf("Should not have any image")) + dockerCmd(c, "load", "-i", "saveimg.tar") + + result := icmd.RunCommand("rm", "-rf", "saveimg.tar") + c.Assert(result, icmd.Matches, icmd.Success) + + out, _ = dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + imageID := strings.TrimSpace(out) + c.Assert(imageID, checker.Equals, longImageID, check.Commentf("Should have same image id as before")) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=load") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "load", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=save") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches = eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "save", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsPluginOps(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + since := daemonUnixTime(c) + + dockerCmd(c, "plugin", "install", pNameWithTag, "--grant-all-permissions") + dockerCmd(c, "plugin", "disable", pNameWithTag) + dockerCmd(c, "plugin", "remove", pNameWithTag) + + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 4) + + pluginEvents := eventActionsByIDAndType(c, events, pNameWithTag, "plugin") + c.Assert(pluginEvents, checker.HasLen, 4, check.Commentf("events: %v", events)) + + c.Assert(pluginEvents[0], checker.Equals, "pull", check.Commentf(out)) + c.Assert(pluginEvents[1], checker.Equals, "enable", check.Commentf(out)) + c.Assert(pluginEvents[2], checker.Equals, "disable", check.Commentf(out)) + c.Assert(pluginEvents[3], checker.Equals, "remove", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilters(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die") + parseEvents(c, out, "die") + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die", "--filter", "event=start") + parseEvents(c, out, "die|start") + + // make sure we at least got 2 start events + count := strings.Count(out, "start") + c.Assert(strings.Count(out, "start"), checker.GreaterOrEqualThan, 2, check.Commentf("should have had 2 start events but had %d, out: %s", count, out)) + +} + +func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "run", "--name", "container_1", "-d", "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--name", "container_2", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + name := "busybox" + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("image=%s", name)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + c.Assert(events, checker.Not(checker.HasLen), 0) //Expected events but found none for the image busybox:latest + count1 := 0 + count2 := 0 + + for _, e := range events { + if strings.Contains(e, container1) { + count1++ + } else if strings.Contains(e, container2) { + count2++ + } + } + c.Assert(count1, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count1, container1)) + c.Assert(count2, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count2, container2)) + +} + +func (s *DockerSuite) TestEventsFilterLabels(c *check.C) { + since := daemonUnixTime(c) + label := "io.docker.testing=foo" + + out, _ := dockerCmd(c, "run", "-d", "-l", label, "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 3) + + for _, e := range events { + c.Assert(e, checker.Contains, container1) + c.Assert(e, checker.Not(checker.Contains), container2) + } +} + +func (s *DockerSuite) TestEventsFilterImageLabels(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label))) + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } +} + +func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { + since := daemonUnixTime(c) + nameID := make(map[string]string) + + for _, name := range []string{"container_1", "container_2"} { + dockerCmd(c, "run", "--name", name, "busybox", "true") + id := inspectField(c, name, "Id") + nameID[name] = id + } + + until := daemonUnixTime(c) + + checkEvents := func(id string, events []string) error { + if len(events) != 4 { // create, attach, start, die + return fmt.Errorf("expected 4 events, got %v", events) + } + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if !matchEventID(matches, id) { + return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, matches["id"]) + } + } + return nil + } + + for name, ID := range nameID { + // filter by names + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+name) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + + // filter by ID's + out, _ = dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+ID) + events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + } +} + +func (s *DockerSuite) TestEventsCommit(c *check.C) { + // Problematic on Windows as cannot commit a running container + testRequires(c, DaemonIsLinux) + + out := runSleepingContainer(c) + cID := strings.TrimSpace(out) + cli.WaitRun(c, cID) + + cli.DockerCmd(c, "commit", "-m", "test", cID) + cli.DockerCmd(c, "stop", cID) + cli.WaitExited(c, cID, 5*time.Second) + + until := daemonUnixTime(c) + out = cli.DockerCmd(c, "events", "-f", "container="+cID, "--until="+until).Combined() + c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) +} + +func (s *DockerSuite) TestEventsCopy(c *check.C) { + // Build a test image. + buildImageSuccessfully(c, "cpimg", build.WithDockerfile(` + FROM busybox + RUN echo HI > /file`)) + id := getIDByName(c, "cpimg") + + // Create an empty test file. + tempFile, err := ioutil.TempFile("", "test-events-copy-") + c.Assert(err, checker.IsNil) + defer os.Remove(tempFile.Name()) + + c.Assert(tempFile.Close(), checker.IsNil) + + dockerCmd(c, "create", "--name=cptest", id) + + dockerCmd(c, "cp", "cptest:/file", tempFile.Name()) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "archive-path", check.Commentf("Missing 'archive-path' log event\n")) + + dockerCmd(c, "cp", tempFile.Name(), "cptest:/filecopy") + + until = daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "extract-to-dir", check.Commentf("Missing 'extract-to-dir' log event")) +} + +func (s *DockerSuite) TestEventsResize(c *check.C) { + out := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + endpoint := "/containers/" + cID + "/resize?h=80&w=24" + status, _, err := request.SockRequest("POST", endpoint, nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "resize", check.Commentf("Missing 'resize' log event")) +} + +func (s *DockerSuite) TestEventsAttach(c *check.C) { + // TODO Windows CI: Figure out why this test fails intermittently (TP5). + testRequires(c, DaemonIsLinux) + + out := cli.DockerCmd(c, "run", "-di", "busybox", "cat").Combined() + cID := strings.TrimSpace(out) + cli.WaitRun(c, cID) + + cmd := exec.Command(dockerBinary, "attach", cID) + stdin, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + // Make sure we're done attaching by writing/reading some stuff + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello'")) + + c.Assert(stdin.Close(), checker.IsNil) + + cli.DockerCmd(c, "kill", cID) + cli.WaitExited(c, cID, 5*time.Second) + + until := daemonUnixTime(c) + out = cli.DockerCmd(c, "events", "-f", "container="+cID, "--until="+until).Combined() + c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) +} + +func (s *DockerSuite) TestEventsRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "oldName", "busybox", "true") + cID := strings.TrimSpace(out) + dockerCmd(c, "rename", "oldName", "newName") + + until := daemonUnixTime(c) + // filter by the container id because the name in the event will be the new name. + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until", until) + c.Assert(out, checker.Contains, "rename", check.Commentf("Missing 'rename' log event\n")) +} + +func (s *DockerSuite) TestEventsTop(c *check.C) { + // Problematic on Windows as Windows does not support top + testRequires(c, DaemonIsLinux) + + out := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "top", cID) + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, " top", check.Commentf("Missing 'top' log event")) +} + +// #14316 +func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { + // Problematic to port for Windows CI during TP5 timeframe until + // supporting push + testRequires(c, DaemonIsLinux) + testRequires(c, Network) + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "commit", cID, repoName) + dockerCmd(c, "stop", cID) + dockerCmd(c, "push", repoName) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "image="+repoName, "-f", "event=push", "--until", until) + c.Assert(out, checker.Contains, repoName, check.Commentf("Missing 'push' log event for %s", repoName)) +} + +func (s *DockerSuite) TestEventsFilterType(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label))) + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=container") + events = strings.Split(strings.TrimSpace(out), "\n") + + // Events generated by the container that builds the image + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", "type=network") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 1, check.Commentf("Events == %s", events)) +} + +// #25798 +func (s *DockerSuite) TestEventsSpecialFiltersWithExecCreate(c *check.C) { + since := daemonUnixTime(c) + runSleepingContainer(c, "--name", "test-container", "-d") + waitRun("test-container") + + dockerCmd(c, "exec", "test-container", "echo", "hello-world") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event='exec_create: echo hello-world'", + ) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event=exec_create", + ) + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilterImageInContainerAction(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerRestart(c *check.C) { + dockerCmd(c, "run", "-d", "--name=testEvent", "--restart=on-failure:3", "busybox", "false") + + // wait until test2 is auto removed. + waitTime := 10 * time.Second + if testEnv.DaemonPlatform() == "windows" { + // Windows takes longer... + waitTime = 90 * time.Second + } + + err := waitInspect("testEvent", "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTime) + c.Assert(err, checker.IsNil) + + var ( + createCount int + startCount int + dieCount int + ) + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container=testEvent") + events := strings.Split(strings.TrimSpace(out), "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 1) //Missing expected event + actions := eventActionsByIDAndType(c, events, "testEvent", "container") + + for _, a := range actions { + switch a { + case "create": + createCount++ + case "start": + startCount++ + case "die": + dieCount++ + } + } + c.Assert(createCount, checker.Equals, 1, check.Commentf("testEvent should be created 1 times: %v", actions)) + c.Assert(startCount, checker.Equals, 4, check.Commentf("testEvent should start 4 times: %v", actions)) + c.Assert(dieCount, checker.Equals, 4, check.Commentf("testEvent should die 4 times: %v", actions)) +} + +func (s *DockerSuite) TestEventsSinceInTheFuture(c *check.C) { + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + since := daemonTime(c) + until := since.Add(time.Duration(-24) * time.Hour) + out, _, err := dockerCmdWithError("events", "--filter", "image=busybox", "--since", parseEventTime(since), "--until", parseEventTime(until)) + + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "cannot be after `until`") +} + +func (s *DockerSuite) TestEventsUntilInThePast(c *check.C) { + since := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + until := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container2", "-d", "busybox", "true") + waitRun("test-container2") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", until) + + c.Assert(out, checker.Not(checker.Contains), "test-container2") + c.Assert(out, checker.Contains, "test-container") +} + +func (s *DockerSuite) TestEventsFormat(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--format", "{{json .}}") + dec := json.NewDecoder(strings.NewReader(out)) + // make sure we got 2 start events + startCount := 0 + for { + var err error + var ev eventtypes.Message + if err = dec.Decode(&ev); err == io.EOF { + break + } + c.Assert(err, checker.IsNil) + if ev.Status == "start" { + startCount++ + } + } + + c.Assert(startCount, checker.Equals, 2, check.Commentf("should have had 2 start events but had %d, out: %s", startCount, out)) +} + +func (s *DockerSuite) TestEventsFormatBadFunc(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{badFuncString .}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1: function \"badFuncString\" not defined", + }) +} + +func (s *DockerSuite) TestEventsFormatBadField(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{.badFieldString}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1:2: executing \"\" at <.badFieldString>: can't evaluate field badFieldString in type *events.Message", + }) +} diff --git a/integration-cli/docker_cli_events_unix_test.go b/integration-cli/docker_cli_events_unix_test.go new file mode 100644 index 0000000..0bd6f54 --- /dev/null +++ b/integration-cli/docker_cli_events_unix_test.go @@ -0,0 +1,485 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "syscall" + "time" + "unicode" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #5979 +func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "busybox", "true") + + file, err := ioutil.TempFile("", "") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file")) + defer os.Remove(file.Name()) + + command := fmt.Sprintf("%s events --since=%s --until=%s > %s", dockerBinary, since, daemonUnixTime(c), file.Name()) + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), checker.IsNil, check.Commentf("run err for command %q", command)) + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + for _, ch := range scanner.Text() { + c.Assert(unicode.IsControl(ch), checker.False, check.Commentf("found control character %v", []byte(string(ch)))) + } + } + c.Assert(scanner.Err(), checker.IsNil, check.Commentf("Scan err for command %q", command)) + +} + +func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport) + + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--name", "oomFalse", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } + + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=oomFalse", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + nEvents := len(events) + + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + c.Assert(parseEventAction(c, events[nEvents-5]), checker.Equals, "create") + c.Assert(parseEventAction(c, events[nEvents-4]), checker.Equals, "attach") + c.Assert(parseEventAction(c, events[nEvents-3]), checker.Equals, "start") + c.Assert(parseEventAction(c, events[nEvents-2]), checker.Equals, "oom") + c.Assert(parseEventAction(c, events[nEvents-1]), checker.Equals, "die") +} + +func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport) + + errChan := make(chan error) + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--oom-kill-disable=true", "--name", "oomTrue", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + c.Assert(waitRun("oomTrue"), checker.IsNil) + defer dockerCmd(c, "kill", "oomTrue") + containerID := inspectField(c, "oomTrue", "Id") + + testActions := map[string]chan bool{ + "oom": make(chan bool), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(20 * time.Second): + observer.CheckEventError(c, containerID, "oom", matcher) + case <-testActions["oom"]: + // ignore, done + case errRun := <-errChan: + if errRun != nil { + c.Fatalf("%v", errRun) + } else { + c.Fatalf("container should be still running but it's not") + } + } + + status := inspectField(c, "oomTrue", "State.Status") + c.Assert(strings.TrimSpace(status), checker.Equals, "running", check.Commentf("container should be still running")) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterByName(c *check.C) { + testRequires(c, DaemonIsLinux) + cOut, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c1 := strings.TrimSpace(cOut) + waitRun("foo") + cOut, _ = dockerCmd(c, "run", "--name=bar", "-d", "busybox", "top") + c2 := strings.TrimSpace(cOut) + waitRun("bar") + out, _ := dockerCmd(c, "events", "-f", "container=foo", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(out, checker.Contains, c1, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), c2, check.Commentf(out)) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterBeforeCreate(c *check.C) { + testRequires(c, DaemonIsLinux) + buf := &bytes.Buffer{} + cmd := exec.Command(dockerBinary, "events", "-f", "container=foo", "--since=0") + cmd.Stdout = buf + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Wait() + defer cmd.Process.Kill() + + // Sleep for a second to make sure we are testing the case where events are listened before container starts. + time.Sleep(time.Second) + id, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + cID := strings.TrimSpace(id) + for i := 0; ; i++ { + out := buf.String() + if strings.Contains(out, cID) { + break + } + if i > 30 { + c.Fatalf("Missing event of container (foo, %v), got %q", cID, out) + } + time.Sleep(500 * time.Millisecond) + } +} + +func (s *DockerSuite) TestVolumeEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/mount volume actions + dockerCmd(c, "volume", "create", "test-event-volume-local") + dockerCmd(c, "run", "--name", "test-volume-container", "--volume", "test-event-volume-local:/foo", "-d", "busybox", "true") + waitRun("test-volume-container") + + // Observe unmount/destroy volume actions + dockerCmd(c, "rm", "-f", "test-volume-container") + dockerCmd(c, "volume", "rm", "test-event-volume-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + volumeEvents := eventActionsByIDAndType(c, events, "test-event-volume-local", "volume") + c.Assert(volumeEvents, checker.HasLen, 4) + c.Assert(volumeEvents[0], checker.Equals, "create") + c.Assert(volumeEvents[1], checker.Equals, "mount") + c.Assert(volumeEvents[2], checker.Equals, "unmount") + c.Assert(volumeEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestNetworkEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local", "-d", "busybox", "true") + waitRun("test-network-container") + + // Observe disconnect/destroy network actions + dockerCmd(c, "rm", "-f", "test-network-container") + dockerCmd(c, "network", "rm", "test-event-network-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + netEvents := eventActionsByIDAndType(c, events, "test-event-network-local", "network") + c.Assert(netEvents, checker.HasLen, 4) + c.Assert(netEvents[0], checker.Equals, "create") + c.Assert(netEvents[1], checker.Equals, "connect") + c.Assert(netEvents[2], checker.Equals, "disconnect") + c.Assert(netEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestEventsContainerWithMultiNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local-1") + dockerCmd(c, "network", "create", "test-event-network-local-2") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local-1", "-td", "busybox", "sh") + waitRun("test-network-container") + dockerCmd(c, "network", "connect", "test-event-network-local-2", "test-network-container") + + since := daemonUnixTime(c) + + dockerCmd(c, "stop", "-t", "1", "test-network-container") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "-f", "type=network") + netEvents := strings.Split(strings.TrimSpace(out), "\n") + + // received two network disconnect events + c.Assert(len(netEvents), checker.Equals, 2) + c.Assert(netEvents[0], checker.Contains, "disconnect") + c.Assert(netEvents[1], checker.Contains, "disconnect") + + //both networks appeared in the network event output + c.Assert(out, checker.Contains, "test-event-network-local-1") + c.Assert(out, checker.Contains, "test-event-network-local-2") +} + +func (s *DockerSuite) TestEventsStreaming(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + out, _ := dockerCmd(c, "run", "-d", "busybox:latest", "true") + containerID := strings.TrimSpace(out) + + testActions := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + "die": make(chan bool, 1), + "destroy": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "create", matcher) + case <-testActions["create"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } + + dockerCmd(c, "rm", containerID) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "destroy", matcher) + case <-testActions["destroy"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + name := "testimageevents" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + MAINTAINER "docker"`)) + imageID := getIDByName(c, name) + c.Assert(deleteImages(name), checker.IsNil) + + testActions := map[string]chan bool{ + "untag": make(chan bool, 1), + "delete": make(chan bool, 1), + } + + matcher := matchEventLine(imageID, "image", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "untag", matcher) + case <-testActions["untag"]: + // ignore, done + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "delete", matcher) + case <-testActions["delete"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsFilterVolumeAndNetworkType(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-type") + dockerCmd(c, "volume", "create", "test-event-volume-type") + + out, _ := dockerCmd(c, "events", "--filter", "type=volume", "--filter", "type=network", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 2, check.Commentf(out)) + + networkActions := eventActionsByIDAndType(c, events, "test-event-network-type", "network") + volumeActions := eventActionsByIDAndType(c, events, "test-event-volume-type", "volume") + + c.Assert(volumeActions[0], checker.Equals, "create") + c.Assert(networkActions[0], checker.Equals, "create") +} + +func (s *DockerSuite) TestEventsFilterVolumeID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "volume", "create", "test-event-volume-id") + out, _ := dockerCmd(c, "events", "--filter", "volume=test-event-volume-id", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-volume-id") + c.Assert(events[0], checker.Contains, "driver=local") +} + +func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-local") + out, _ := dockerCmd(c, "events", "--filter", "network=test-event-network-local", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-network-local") + c.Assert(events[0], checker.Contains, "type=bridge") +} + +func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{"max-concurrent-downloads":1,"labels":["bar=foo"], "shutdown-timeout": 10}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (allow-nondistributable-artifacts=[], cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, default-shm-size=67108864, insecure-registries=[], labels=[\"bar=foo\"], live-restore=false, max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, registry-mirrors=[], runtimes=runc:{docker-runc []} bare:{ []}, shutdown-timeout=10)", daemonID, daemonName)) +} + +func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonID)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonName)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "daemon=foo") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=daemon") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=container") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) +} diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go new file mode 100644 index 0000000..be228ab --- /dev/null +++ b/integration-cli/docker_cli_exec_test.go @@ -0,0 +1,602 @@ +// +build !test_no_exec + +package main + +import ( + "bufio" + "fmt" + "net/http" + "os" + "os/exec" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExec(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "cat", "/tmp/file") + out = strings.Trim(out, "\r\n") + c.Assert(out, checker.Equals, "test") + +} + +func (s *DockerSuite) TestExecInteractive(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + + execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") + stdin, err := execCmd.StdinPipe() + c.Assert(err, checker.IsNil) + stdout, err := execCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + + err = execCmd.Start() + c.Assert(err, checker.IsNil) + _, err = stdin.Write([]byte("cat /tmp/file\n")) + c.Assert(err, checker.IsNil) + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + c.Assert(err, checker.IsNil) + line = strings.TrimSpace(line) + c.Assert(line, checker.Equals, "test") + err = stdin.Close() + c.Assert(err, checker.IsNil) + errChan := make(chan error) + go func() { + errChan <- execCmd.Wait() + close(errChan) + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(1 * time.Second): + c.Fatal("docker exec failed to exit on stdin close") + } + +} + +func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { + out := runSleepingContainer(c) + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + dockerCmd(c, "restart", cleanedContainerID) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello") + outStr := strings.TrimSpace(out) + c.Assert(outStr, checker.Equals, "hello") +} + +func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { + // TODO Windows CI: Requires a little work to get this ported. + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out)) + + s.d.Restart(c) + + out, err = s.d.Cmd("start", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out)) + + out, err = s.d.Cmd("exec", "top", "echo", "hello") + c.Assert(err, checker.IsNil, check.Commentf("Could not exec on container top: %s", out)) + + outStr := strings.TrimSpace(string(out)) + c.Assert(outStr, checker.Equals, "hello") +} + +// Regression test for #9155, #9044 +func (s *DockerSuite) TestExecEnv(c *check.C) { + // TODO Windows CI: This one is interesting and may just end up being a feature + // difference between Windows and Linux. On Windows, the environment is passed + // into the process that is launched, not into the machine environment. Hence + // a subsequent exec will not have LALA set/ + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "LALA=value1") + c.Assert(out, checker.Contains, "LALA=value2") + c.Assert(out, checker.Contains, "HOME=/root") +} + +func (s *DockerSuite) TestExecSetEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "HOME=/root", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "-e", "HOME=/another", "-e", "ABC=xyz", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "HOME=/root") + c.Assert(out, checker.Contains, "HOME=/another") + c.Assert(out, checker.Contains, "ABC=xyz") +} + +func (s *DockerSuite) TestExecExitStatus(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top") + + result := icmd.RunCommand(dockerBinary, "exec", "top", "sh", "-c", "exit 23") + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 23, Error: "exit status 23"}) +} + +func (s *DockerSuite) TestExecPausedContainer(c *check.C) { + testRequires(c, IsPausable) + + out := runSleepingContainer(c, "-d", "--name", "testing") + ContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", "testing") + out, _, err := dockerCmdWithError("exec", ContainerID, "echo", "hello") + c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new command if it is paused")) + + expected := ContainerID + " is paused, unpause the container before exec" + c.Assert(out, checker.Contains, expected, check.Commentf("container should not exec new command if it is paused")) +} + +// regression test for #9476 +func (s *DockerSuite) TestExecTTYCloseStdin(c *check.C) { + // TODO Windows CI: This requires some work to port to Windows. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") + + cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") + stdinRw, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + + stdinRw.Write([]byte("test")) + stdinRw.Close() + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, _ = dockerCmd(c, "top", "exec_tty_stdin") + outArr := strings.Split(out, "\n") + c.Assert(len(outArr), checker.LessOrEqualThan, 3, check.Commentf("exec process left running")) + c.Assert(out, checker.Not(checker.Contains), "nsenter-exec") +} + +func (s *DockerSuite) TestExecTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("exec should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("exec is running but should have failed") + } +} + +// FIXME(vdemeester) this should be a unit tests on cli/command/container package +func (s *DockerSuite) TestExecParseError(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") + + // Test normal (non-detached) case first + icmd.RunCommand(dockerBinary, "exec", "top").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "See 'docker exec --help'", + }) +} + +func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + err := exec.Command(dockerBinary, "exec", "testing", "top").Start() + c.Assert(err, checker.IsNil) + + type dstop struct { + out []byte + err error + } + + ch := make(chan dstop) + go func() { + out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() + ch <- dstop{out, err} + close(ch) + }() + select { + case <-time.After(3 * time.Second): + c.Fatal("Container stop timed out") + case s := <-ch: + c.Assert(s.err, check.IsNil) + } +} + +func (s *DockerSuite) TestExecCgroup(c *check.C) { + // Not applicable on Windows - using Linux specific functionality + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup") + containerCgroups := sort.StringSlice(strings.Split(out, "\n")) + + var wg sync.WaitGroup + var mu sync.Mutex + execCgroups := []sort.StringSlice{} + errChan := make(chan error) + // exec a few times concurrently to get consistent failure + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + out, _, err := dockerCmdWithError("exec", "testing", "cat", "/proc/self/cgroup") + if err != nil { + errChan <- err + return + } + cg := sort.StringSlice(strings.Split(out, "\n")) + + mu.Lock() + execCgroups = append(execCgroups, cg) + mu.Unlock() + wg.Done() + }() + } + wg.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil) + } + + for _, cg := range execCgroups { + if !reflect.DeepEqual(cg, containerCgroups) { + fmt.Println("exec cgroups:") + for _, name := range cg { + fmt.Printf(" %s\n", name) + } + + fmt.Println("container cgroups:") + for _, name := range containerCgroups { + fmt.Printf(" %s\n", name) + } + c.Fatal("cgroups mismatched") + } + } +} + +func (s *DockerSuite) TestExecInspectID(c *check.C) { + out := runSleepingContainer(c, "-d") + id := strings.TrimSuffix(out, "\n") + + out = inspectField(c, id, "ExecIDs") + c.Assert(out, checker.Equals, "[]", check.Commentf("ExecIDs should be empty, got: %s", out)) + + // Start an exec, have it block waiting so we can do some checking + cmd := exec.Command(dockerBinary, "exec", id, "sh", "-c", + "while ! test -e /execid1; do sleep 1; done") + + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("failed to start the exec cmd")) + + // Give the exec 10 chances/seconds to start then give up and stop the test + tries := 10 + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out != "[]" && out != "" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // Save execID for later + execID, err := inspectFilter(id, "index .ExecIDs 0") + c.Assert(err, checker.IsNil, check.Commentf("failed to get the exec id")) + + // End the exec by creating the missing file + err = exec.Command(dockerBinary, "exec", id, + "sh", "-c", "touch /execid1").Run() + + c.Assert(err, checker.IsNil, check.Commentf("failed to run the 2nd exec cmd")) + + // Wait for 1st exec to complete + cmd.Wait() + + // Give the exec 10 chances/seconds to stop then give up and stop the test + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out == "[]" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still not empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // But we should still be able to query the execID + sc, body, _ := request.SockRequest("GET", "/exec/"+execID+"/json", nil, daemonHost()) + + c.Assert(sc, checker.Equals, http.StatusOK, check.Commentf("received status != 200 OK: %d\n%s", sc, body)) + + // Now delete the container and then an 'inspect' on the exec should + // result in a 404 (not 'container not running') + out, ec := dockerCmd(c, "rm", "-f", id) + c.Assert(ec, checker.Equals, 0, check.Commentf("error removing container: %s", out)) + sc, body, _ = request.SockRequest("GET", "/exec/"+execID+"/json", nil, daemonHost()) + c.Assert(sc, checker.Equals, http.StatusNotFound, check.Commentf("received status != 404: %d\n%s", sc, body)) +} + +func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { + // Problematic on Windows as Windows does not support links + testRequires(c, DaemonIsLinux) + var out string + out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + c.Assert(idA, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + c.Assert(idB, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") +} + +func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { + // Not applicable on Windows to Windows CI. + testRequires(c, SameHostDaemon, DaemonIsLinux) + for _, fn := range []string{"resolv.conf", "hosts"} { + containers := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + if containers != "" { + cli.DockerCmd(c, append([]string{"rm", "-fv"}, strings.Split(strings.TrimSpace(containers), "\n")...)...) + } + + content := runCommandAndReadContainerFile(c, fn, dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn)) + + c.Assert(strings.TrimSpace(string(content)), checker.Equals, "success", check.Commentf("Content was not what was modified in the container", string(content))) + + out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + contID := strings.TrimSpace(out) + netFilePath := containerStorageFile(contID, fn) + + f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + c.Assert(err, checker.IsNil) + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + c.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + c.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + c.Fatal(err) + } + f.Close() + + res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn) + c.Assert(res, checker.Equals, "success2\n") + } +} + +func (s *DockerSuite) TestExecWithUser(c *check.C) { + // TODO Windows CI: This may be fixable in the future once Windows + // supports users + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id") + c.Assert(out, checker.Contains, "uid=1(daemon) gid=1(daemon)") + + out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id") + c.Assert(out, checker.Contains, "uid=0(root) gid=0(root)", check.Commentf("exec with user by id expected daemon user got %s", out)) +} + +func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Start main loop which attempts mknod repeatedly + dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) + + // Check exec mknod doesn't work + icmd.RunCommand(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + + // Check exec mknod does work with --privileged + result := icmd.RunCommand(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) + result.Assert(c, icmd.Success) + + actual := strings.TrimSpace(result.Combined()) + c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", result.Combined())) + + // Check subsequent unprivileged exec cannot mknod + icmd.RunCommand(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // Confirm at no point was mknod allowed + result = icmd.RunCommand(dockerBinary, "logs", "parent") + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Success") + +} + +func (s *DockerSuite) TestExecWithImageUser(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio`)) + dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") + + out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") + c.Assert(out, checker.Contains, "dockerio", check.Commentf("exec with user by id expected dockerio user got %s", out)) +} + +func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { + // Windows does not support read-only + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top") + dockerCmd(c, "exec", "parent", "true") +} + +func (s *DockerSuite) TestExecUlimits(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testexeculimits" + runSleepingContainer(c, "-d", "--ulimit", "nofile=511:511", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -n") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "511") +} + +// #15750 +func (s *DockerSuite) TestExecStartFails(c *check.C) { + // TODO Windows CI. This test should be portable. Figure out why it fails + // currently. + testRequires(c, DaemonIsLinux) + name := "exec-15750" + runSleepingContainer(c, "-d", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "no-such-cmd") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "executable file not found") +} + +// Fix regression in https://github.com/docker/docker/pull/26461#issuecomment-250287297 +func (s *DockerSuite) TestExecWindowsPathNotWiped(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", minimalBaseImage(), "powershell", "start-sleep", "60") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "powershell", "write-host", "$env:PATH") + out = strings.ToLower(strings.Trim(out, "\r\n")) + c.Assert(out, checker.Contains, `windowspowershell\v1.0`) +} + +func (s *DockerSuite) TestExecEnvLinksHost(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-d", "--name", "foo") + runSleepingContainer(c, "-d", "--link", "foo:db", "--hostname", "myhost", "--name", "bar") + out, _ := dockerCmd(c, "exec", "bar", "env") + c.Assert(out, checker.Contains, "HOSTNAME=myhost") + c.Assert(out, checker.Contains, "DB_NAME=/bar/db") +} + +func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { + testRequires(c, DaemonIsWindows) + runSleepingContainer(c, "-d", "--name", "test") + exec := make(chan bool) + go func() { + dockerCmd(c, "exec", "test", "cmd", "/c", "start sleep 10") + exec <- true + }() + + count := 0 + for { + top := make(chan string) + var out string + go func() { + out, _ := dockerCmd(c, "top", "test") + top <- out + }() + + select { + case <-time.After(time.Second * 5): + c.Fatal("timed out waiting for top while exec is exiting") + case out = <-top: + break + } + + if strings.Count(out, "busybox.exe") == 2 && !strings.Contains(out, "cmd.exe") { + // The initial exec process (cmd.exe) has exited, and both sleeps are currently running + break + } + count++ + if count >= 30 { + c.Fatal("too many retries") + } + time.Sleep(1 * time.Second) + } + + inspect := make(chan bool) + go func() { + dockerCmd(c, "inspect", "test") + inspect <- true + }() + + select { + case <-time.After(time.Second * 5): + c.Fatal("timed out waiting for inspect while exec is exiting") + case <-inspect: + break + } + + // Ensure the background sleep is still running + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 2) + + // The exec should exit when the background sleep exits + select { + case <-time.After(time.Second * 15): + c.Fatal("timed out waiting for async exec to exit") + case <-exec: + // Ensure the background sleep has actually exited + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 1) + break + } +} diff --git a/integration-cli/docker_cli_exec_unix_test.go b/integration-cli/docker_cli_exec_unix_test.go new file mode 100644 index 0000000..5d8efc7 --- /dev/null +++ b/integration-cli/docker_cli_exec_unix_test.go @@ -0,0 +1,93 @@ +// +build !windows,!test_no_exec + +package main + +import ( + "bytes" + "io" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// regression test for #12546 +func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "exec", "-i", contID, "echo", "-n", "hello") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + ch := make(chan error) + go func() { ch <- cmd.Wait() }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil) + output := b.String() + c.Assert(strings.TrimSpace(output), checker.Equals, "hello") + case <-time.After(5 * time.Second): + c.Fatal("timed out running docker exec") + } +} + +func (s *DockerSuite) TestExecTTY(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + dockerCmd(c, "run", "-d", "--name=test", "busybox", "sh", "-c", "echo hello > /foo && top") + + cmd := exec.Command(dockerBinary, "exec", "-it", "test", "sh") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + defer p.Close() + + _, err = p.Write([]byte("cat /foo && exit\n")) + c.Assert(err, checker.IsNil) + + chErr := make(chan error) + go func() { + chErr <- cmd.Wait() + }() + select { + case err := <-chErr: + c.Assert(err, checker.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for exec to exit") + } + + buf := make([]byte, 256) + read, err := p.Read(buf) + c.Assert(err, checker.IsNil) + c.Assert(bytes.Contains(buf, []byte("hello")), checker.Equals, true, check.Commentf(string(buf[:read]))) +} + +// Test the TERM env var is set when -t is provided on exec +func (s *DockerSuite) TestExecWithTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-id", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", "-t", contID, "sh", "-c", "if [ -z $TERM ]; then exit 1; else exit 0; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} + +// Test that the TERM env var is not set on exec when -t is not provided, even if it was set +// on run +func (s *DockerSuite) TestExecWithNoTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", contID, "sh", "-c", "if [ -z $TERM ]; then exit 0; else exit 1; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} diff --git a/integration-cli/docker_cli_experimental_test.go b/integration-cli/docker_cli_experimental_test.go new file mode 100644 index 0000000..0a496fd --- /dev/null +++ b/integration-cli/docker_cli_experimental_test.go @@ -0,0 +1,29 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExperimentalVersionTrue(c *check.C) { + testExperimentalInVersion(c, ExperimentalDaemon, "*true") +} + +func (s *DockerSuite) TestExperimentalVersionFalse(c *check.C) { + testExperimentalInVersion(c, NotExperimentalDaemon, "*false") +} + +func testExperimentalInVersion(c *check.C, requirement func() bool, expectedValue string) { + testRequires(c, requirement) + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { + c.Assert(line, checker.Matches, expectedValue) + return + } + } + + c.Fatal(`"Experimental" not found in version output`) +} diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go new file mode 100644 index 0000000..fe117b9 --- /dev/null +++ b/integration-cli/docker_cli_export_import_test.go @@ -0,0 +1,51 @@ +package main + +import ( + "os" + "strings" + + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// export an image and try to import it into a new one +func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + + out, _ := dockerCmd(c, "export", containerID) + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "import", "-", "repo/testexp:v1"}, + Stdin: strings.NewReader(out), + }) + result.Assert(c, icmd.Success) + + cleanedImageID := strings.TrimSpace(result.Combined()) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} + +// Used to test output flag in the export command +func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerwithoutputandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + dockerCmd(c, "export", "--output=testexp.tar", containerID) + defer os.Remove("testexp.tar") + + resultCat := icmd.RunCommand("cat", "testexp.tar") + resultCat.Assert(c, icmd.Success) + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "import", "-", "repo/testexp:v1"}, + Stdin: strings.NewReader(resultCat.Combined()), + }) + result.Assert(c, icmd.Success) + + cleanedImageID := strings.TrimSpace(result.Combined()) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} diff --git a/integration-cli/docker_cli_external_graphdriver_unix_test.go b/integration-cli/docker_cli_external_graphdriver_unix_test.go new file mode 100644 index 0000000..d736c4b --- /dev/null +++ b/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -0,0 +1,406 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strings" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +func init() { + check.Suite(&DockerExternalGraphdriverSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerExternalGraphdriverSuite struct { + server *httptest.Server + jserver *httptest.Server + ds *DockerSuite + d *daemon.Daemon + ec map[string]*graphEventsCounter +} + +type graphEventsCounter struct { + activations int + creations int + removals int + gets int + puts int + stats int + cleanups int + exists int + init int + metadata int + diff int + applydiff int + changes int + diffsize int +} + +func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerExternalGraphdriverSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } +} + +func (s *DockerExternalGraphdriverSuite) SetUpSuite(c *check.C) { + s.ec = make(map[string]*graphEventsCounter) + s.setUpPluginViaSpecFile(c) + s.setUpPluginViaJSONFile(c) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaSpecFile(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + s.setUpPlugin(c, "test-external-graph-driver", "spec", mux, []byte(s.server.URL)) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaJSONFile(c *check.C) { + mux := http.NewServeMux() + s.jserver = httptest.NewServer(mux) + + p := plugins.NewLocalPlugin("json-external-graph-driver", s.jserver.URL) + b, err := json.Marshal(p) + c.Assert(err, check.IsNil) + + s.setUpPlugin(c, "json-external-graph-driver", "json", mux, b) +} + +func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ext string, mux *http.ServeMux, b []byte) { + type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + } + + type graphDriverResponse struct { + Err error `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + } + + respond := func(w http.ResponseWriter, data interface{}) { + w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json") + switch t := data.(type) { + case error: + fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) + case string: + fmt.Fprintln(w, t) + default: + json.NewEncoder(w).Encode(&data) + } + } + + decReq := func(b io.ReadCloser, out interface{}, w http.ResponseWriter) error { + defer b.Close() + if err := json.NewDecoder(b).Decode(&out); err != nil { + http.Error(w, fmt.Sprintf("error decoding json: %s", err.Error()), 500) + } + return nil + } + + base, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + vfsProto, err := vfs.Init(base, []string{}, nil, nil) + c.Assert(err, check.IsNil, check.Commentf("error initializing graph driver")) + driver := graphdriver.NewNaiveDiffDriver(vfsProto, nil, nil) + + s.ec[ext] = &graphEventsCounter{} + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].activations++ + respond(w, `{"Implements": ["GraphDriver"]}`) + }) + + mux.HandleFunc("/GraphDriver.Init", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].init++ + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.CreateReadWrite", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.CreateReadWrite(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.Create(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].removals++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Remove(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].gets++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + dir, err := driver.Get(req.ID, req.MountLabel) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Dir: dir}) + }) + + mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].puts++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Put(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Exists", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].exists++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + respond(w, &graphDriverResponse{Exists: driver.Exists(req.ID)}) + }) + + mux.HandleFunc("/GraphDriver.Status", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].stats++ + respond(w, &graphDriverResponse{Status: driver.Status()}) + }) + + mux.HandleFunc("/GraphDriver.Cleanup", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].cleanups++ + err := driver.Cleanup() + if err != nil { + respond(w, err) + return + } + respond(w, `{}`) + }) + + mux.HandleFunc("/GraphDriver.GetMetadata", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].metadata++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + data, err := driver.GetMetadata(req.ID) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Metadata: data}) + }) + + mux.HandleFunc("/GraphDriver.Diff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diff++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + diff, err := driver.Diff(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + io.Copy(w, diff) + }) + + mux.HandleFunc("/GraphDriver.Changes", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].changes++ + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + changes, err := driver.Changes(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Changes: changes}) + }) + + mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].applydiff++ + diff := r.Body + defer r.Body.Close() + + id := r.URL.Query().Get("id") + parent := r.URL.Query().Get("parent") + + if id == "" { + http.Error(w, fmt.Sprintf("missing id"), 409) + } + + size, err := driver.ApplyDiff(id, parent, diff) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + mux.HandleFunc("/GraphDriver.DiffSize", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diffsize++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + size, err := driver.DiffSize(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + err = os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, check.IsNil, check.Commentf("error creating /etc/docker/plugins")) + + specFile := "/etc/docker/plugins/" + name + "." + ext + err = ioutil.WriteFile(specFile, b, 0644) + c.Assert(err, check.IsNil, check.Commentf("error writing to %s", specFile)) +} + +func (s *DockerExternalGraphdriverSuite) TearDownSuite(c *check.C) { + s.server.Close() + s.jserver.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, check.IsNil, check.Commentf("error removing /etc/docker/plugins")) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { + testRequires(c, ExperimentalDaemon) + + s.testExternalGraphDriver("test-external-graph-driver", "spec", c) + s.testExternalGraphDriver("json-external-graph-driver", "json", c) +} + +func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ext string, c *check.C) { + s.d.StartWithBusybox(c, "-s", name) + + out, err := s.d.Cmd("run", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello") + c.Assert(err, check.IsNil, check.Commentf(out)) + + s.d.Restart(c, "-s", name) + + out, err = s.d.Cmd("inspect", "--format={{.GraphDriver.Name}}", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + out, err = s.d.Cmd("diff", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "A /hello"), check.Equals, true, check.Commentf("diff output: %s", out)) + + out, err = s.d.Cmd("rm", "-f", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("info") + c.Assert(err, check.IsNil, check.Commentf(out)) + + s.d.Stop(c) + + // Don't check s.ec.exists, because the daemon no longer calls the + // Exists function. + c.Assert(s.ec[ext].activations, check.Equals, 2) + c.Assert(s.ec[ext].init, check.Equals, 2) + c.Assert(s.ec[ext].creations >= 1, check.Equals, true) + c.Assert(s.ec[ext].removals >= 1, check.Equals, true) + c.Assert(s.ec[ext].gets >= 1, check.Equals, true) + c.Assert(s.ec[ext].puts >= 1, check.Equals, true) + c.Assert(s.ec[ext].stats, check.Equals, 5) + c.Assert(s.ec[ext].cleanups, check.Equals, 2) + c.Assert(s.ec[ext].applydiff >= 1, check.Equals, true) + c.Assert(s.ec[ext].changes, check.Equals, 1) + c.Assert(s.ec[ext].diffsize, check.Equals, 0) + c.Assert(s.ec[ext].diff, check.Equals, 0) + c.Assert(s.ec[ext].metadata, check.Equals, 1) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) { + testRequires(c, Network, ExperimentalDaemon) + + s.d.Start(c) + + out, err := s.d.Cmd("pull", "busybox:latest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) +} diff --git a/integration-cli/docker_cli_external_volume_driver_unix_test.go b/integration-cli/docker_cli_external_volume_driver_unix_test.go new file mode 100644 index 0000000..5fe417c --- /dev/null +++ b/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -0,0 +1,633 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +const volumePluginName = "test-external-volume-driver" + +func init() { + check.Suite(&DockerExternalVolumeSuite{ + ds: &DockerSuite{}, + }) +} + +type eventCounter struct { + activations int + creations int + removals int + mounts int + unmounts int + paths int + lists int + gets int + caps int +} + +type DockerExternalVolumeSuite struct { + ds *DockerSuite + d *daemon.Daemon + *volumePlugin +} + +func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + s.ec = &eventCounter{} +} + +func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } +} + +func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { + s.volumePlugin = newVolumePlugin(c, volumePluginName) +} + +type volumePlugin struct { + ec *eventCounter + *httptest.Server + vols map[string]vol +} + +type vol struct { + Name string + Mountpoint string + Ninja bool // hack used to trigger a null volume return on `Get` + Status map[string]interface{} + Options map[string]string +} + +func (p *volumePlugin) Close() { + p.Server.Close() +} + +func newVolumePlugin(c *check.C, name string) *volumePlugin { + mux := http.NewServeMux() + s := &volumePlugin{Server: httptest.NewServer(mux), ec: &eventCounter{}, vols: make(map[string]vol)} + + type pluginRequest struct { + Name string + Opts map[string]string + ID string + } + + type pluginResp struct { + Mountpoint string `json:",omitempty"` + Err string `json:",omitempty"` + } + + read := func(b io.ReadCloser) (pluginRequest, error) { + defer b.Close() + var pr pluginRequest + err := json.NewDecoder(b).Decode(&pr) + return pr, err + } + + send := func(w http.ResponseWriter, data interface{}) { + switch t := data.(type) { + case error: + http.Error(w, t.Error(), 500) + case string: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, t) + default: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + json.NewEncoder(w).Encode(&data) + } + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec.activations++ + send(w, `{"Implements": ["VolumeDriver"]}`) + }) + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec.creations++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + _, isNinja := pr.Opts["ninja"] + status := map[string]interface{}{"Hello": "world"} + s.vols[pr.Name] = vol{Name: pr.Name, Ninja: isNinja, Status: status, Options: pr.Opts} + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + s.ec.lists++ + vols := make([]vol, 0, len(s.vols)) + for _, v := range s.vols { + if v.Ninja { + continue + } + vols = append(vols, v) + } + send(w, map[string][]vol{"Volumes": vols}) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec.gets++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, exists := s.vols[pr.Name] + if !exists { + send(w, `{"Err": "no such volume"}`) + } + + if v.Ninja { + send(w, map[string]vol{}) + return + } + + v.Mountpoint = hostVolumePath(pr.Name) + send(w, map[string]vol{"Volume": v}) + return + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec.removals++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, ok := s.vols[pr.Name] + if !ok { + send(w, nil) + return + } + + if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + delete(s.vols, v.Name) + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + s.ec.paths++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + p := hostVolumePath(pr.Name) + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + s.ec.mounts++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + if v, exists := s.vols[pr.Name]; exists { + // Use this to simulate a mount failure + if _, exists := v.Options["invalidOption"]; exists { + send(w, fmt.Errorf("invalid argument")) + return + } + } + + p := hostVolumePath(pr.Name) + if err := os.MkdirAll(p, 0755); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.Server.URL), 0644); err != nil { + send(w, err) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "mountID"), []byte(pr.ID), 0644); err != nil { + send(w, err) + return + } + + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + s.ec.unmounts++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + s.ec.caps++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, `{"Capabilities": { "Scope": "global" }}`) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + err = ioutil.WriteFile("/etc/docker/plugins/"+name+".spec", []byte(s.Server.URL), 0644) + c.Assert(err, checker.IsNil) + return s +} + +func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) { + s.volumePlugin.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *check.C) { + dockerCmd(c, "volume", "create", "test") + + out, _, err := dockerCmdWithError("volume", "create", "test", "--driver", volumePluginName) + c.Assert(err, check.NotNil, check.Commentf("volume create exception name already in use with another driver")) + c.Assert(out, checker.Contains, "must be unique") + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Driver }}", "test") + _, _, err = dockerCmdWithError("volume", "create", "test", "--driver", strings.TrimSpace(out)) + c.Assert(err, check.IsNil) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + p := hostVolumePath("external-volume-test") + _, err = os.Lstat(p) + c.Assert(err, checker.NotNil) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("Expected volume path in host to not exist: %s, %v\n", p, err)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 2) + c.Assert(s.ec.unmounts, checker.Equals, 2) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func hostVolumePath(name string) string { + return fmt.Sprintf("/var/lib/docker/volumes/%s", name) +} + +// Make sure a request to use a down driver doesn't block other requests +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *check.C) { + specPath := "/etc/docker/plugins/down-driver.spec" + err := ioutil.WriteFile(specPath, []byte("tcp://127.0.0.7:9999"), 0644) + c.Assert(err, check.IsNil) + defer os.RemoveAll(specPath) + + chCmd1 := make(chan struct{}) + chCmd2 := make(chan error) + cmd1 := exec.Command(dockerBinary, "volume", "create", "-d", "down-driver") + cmd2 := exec.Command(dockerBinary, "volume", "create") + + c.Assert(cmd1.Start(), checker.IsNil) + defer cmd1.Process.Kill() + time.Sleep(100 * time.Millisecond) // ensure API has been called + c.Assert(cmd2.Start(), checker.IsNil) + + go func() { + cmd1.Wait() + close(chCmd1) + }() + go func() { + chCmd2 <- cmd2.Wait() + }() + + select { + case <-chCmd1: + cmd2.Process.Kill() + c.Fatalf("volume create with down driver finished unexpectedly") + case err := <-chCmd2: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + cmd2.Process.Kill() + c.Fatal("volume creates are blocked by previous create requests when previous driver is down") + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) { + s.d.StartWithBusybox(c) + driverName := "test-external-volume-driver-retry" + + errchan := make(chan error) + started := make(chan struct{}) + go func() { + close(started) + if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", driverName, "busybox:latest"); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + <-started + // wait for a retry to occur, then create spec to allow plugin to register + time.Sleep(2 * time.Second) + p := newVolumePlugin(c, driverName) + defer p.Close() + + select { + case err := <-errchan: + c.Assert(err, checker.IsNil) + case <-time.After(8 * time.Second): + c.Fatal("volume creates fail when plugin not immediately available") + } + + _, err := s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + c.Assert(p.ec.activations, checker.Equals, 1) + c.Assert(p.ec.creations, checker.Equals, 1) + c.Assert(p.ec.removals, checker.Equals, 1) + c.Assert(p.ec.mounts, checker.Equals, 1) + c.Assert(p.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "foo") + dockerCmd(c, "run", "-d", "--name", "testing", "-v", "foo:/bar", "busybox", "top") + + var mounts []struct { + Name string + Driver string + } + out := inspectFieldJSON(c, "testing", "Mounts") + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverList(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc3") + out, _ := dockerCmd(c, "volume", "ls") + ls := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(ls), check.Equals, 2, check.Commentf("\n%s", out)) + + vol := strings.Fields(ls[len(ls)-1]) + c.Assert(len(vol), check.Equals, 2, check.Commentf("%v", vol)) + c.Assert(vol[0], check.Equals, volumePluginName) + c.Assert(vol[1], check.Equals, "abc3") + + c.Assert(s.ec.lists, check.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { + out, _, err := dockerCmdWithError("volume", "inspect", "dummy") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") + c.Assert(s.ec.gets, check.Equals, 1) + + dockerCmd(c, "volume", "create", "test", "-d", volumePluginName) + out, _ = dockerCmd(c, "volume", "inspect", "test") + + type vol struct { + Status map[string]string + } + var st []vol + + c.Assert(json.Unmarshal([]byte(out), &st), checker.IsNil) + c.Assert(st, checker.HasLen, 1) + c.Assert(st[0].Status, checker.HasLen, 1, check.Commentf("%v", st[0])) + c.Assert(st[0].Status["Hello"], checker.Equals, "world", check.Commentf("%v", st[0].Status)) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc1") + s.d.Restart(c) + + dockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true") + var mounts []types.MountPoint + inspectFieldAndUnmarshall(c, "test", "Mounts", &mounts) + c.Assert(mounts, checker.HasLen, 1) + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +// Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. +// Prior the daemon would panic in this scenario. +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) { + s.d.Start(c) + + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, "abc2", "--opt", "ninja=1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "abc2") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") +} + +// Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path` +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C) { + s.d.Start(c) + c.Assert(s.ec.paths, checker.Equals, 0) + + out, err := s.d.Cmd("volume", "create", "test", "--driver=test-external-volume-driver") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "inspect", "--format='{{.Mountpoint}}'", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + c.Assert(s.ec.paths, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) { + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +// Check that VolumeDriver.Capabilities gets called, and only called once +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *check.C) { + s.d.Start(c) + c.Assert(s.ec.caps, checker.Equals, 0) + + for i := 0; i < 3; i++ { + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.caps, checker.Equals, 1) + out, err = s.d.Cmd("volume", "inspect", "--format={{.Scope}}", fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, volume.GlobalScope) + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *check.C) { + driverName := stringid.GenerateNonCryptoID() + p := newVolumePlugin(c, driverName) + defer p.Close() + + s.d.StartWithBusybox(c) + + out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "must be unique") + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test re-create with same driver + out, err = s.d.Cmd("volume", "create", "-d", driverName, "--opt", "foo=bar", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var vs []types.Volume + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Driver, checker.Equals, driverName) + c.Assert(vs[0].Options, checker.NotNil) + c.Assert(vs[0].Options["foo"], checker.Equals, "bar") + c.Assert(vs[0].Driver, checker.Equals, driverName) + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test create with different driver + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + vs = nil + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Options, checker.HasLen, 0) + c.Assert(vs[0].Driver, checker.Equals, "local") +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *check.C) { + s.d.StartWithBusybox(c) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount") + + out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) + out, _ = s.d.Cmd("run", "-w", "/foo", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnCp(c *check.C) { + s.d.StartWithBusybox(c) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--name=test") + + out, _ := s.d.Cmd("run", "-d", "--name=test", "-v", "test:/foo", "busybox", "/bin/sh", "-c", "touch /test && top") + c.Assert(s.ec.mounts, checker.Equals, 1, check.Commentf(out)) + + out, _ = s.d.Cmd("cp", "test:/test", "/tmp/test") + c.Assert(s.ec.mounts, checker.Equals, 2, check.Commentf(out)) + c.Assert(s.ec.unmounts, checker.Equals, 1, check.Commentf(out)) + + out, _ = s.d.Cmd("kill", "test") + c.Assert(s.ec.unmounts, checker.Equals, 2, check.Commentf(out)) +} diff --git a/integration-cli/docker_cli_health_test.go b/integration-cli/docker_cli_health_test.go new file mode 100644 index 0000000..0f78a41 --- /dev/null +++ b/integration-cli/docker_cli_health_test.go @@ -0,0 +1,164 @@ +package main + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/go-check/check" +) + +func waitForHealthStatus(c *check.C, name string, prev string, expected string) { + prev = prev + "\n" + expected = expected + "\n" + for { + out, _ := dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + if out == expected { + return + } + c.Check(out, checker.Equals, prev) + if out != prev { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func getHealth(c *check.C, name string) *types.Health { + out, _ := dockerCmd(c, "inspect", "--format={{json .State.Health}}", name) + var health types.Health + err := json.Unmarshal([]byte(out), &health) + c.Check(err, checker.Equals, nil) + return &health +} + +func (s *DockerSuite) TestHealth(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD cat /status`)) + + // No health status before starting + name := "test_health" + dockerCmd(c, "create", "--name", name, imageName) + out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}") + c.Check(out, checker.Equals, "Created\n") + + // Inspect the options + out, _ = dockerCmd(c, "inspect", + "--format=timeout={{.Config.Healthcheck.Timeout}} interval={{.Config.Healthcheck.Interval}} retries={{.Config.Healthcheck.Retries}} test={{.Config.Healthcheck.Test}}", name) + c.Check(out, checker.Equals, "timeout=30s interval=1s retries=0 test=[CMD-SHELL cat /status]\n") + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + + // Make it fail + dockerCmd(c, "exec", name, "rm", "/status") + waitForHealthStatus(c, name, "healthy", "unhealthy") + + // Inspect the status + out, _ = dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + c.Check(out, checker.Equals, "unhealthy\n") + + // Make it healthy again + dockerCmd(c, "exec", name, "touch", "/status") + waitForHealthStatus(c, name, "unhealthy", "healthy") + + // Remove container + dockerCmd(c, "rm", "-f", name) + + // Disable the check from the CLI + out, _ = dockerCmd(c, "create", "--name=noh", "--no-healthcheck", imageName) + out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "noh") + c.Check(out, checker.Equals, "[NONE]\n") + dockerCmd(c, "rm", "noh") + + // Disable the check with a new build + buildImageSuccessfully(c, "no_healthcheck", build.WithDockerfile(`FROM testhealth + HEALTHCHECK NONE`)) + + out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck") + c.Check(out, checker.Equals, "[NONE]\n") + + // Enable the checks from the CLI + _, _ = dockerCmd(c, "run", "-d", "--name=fatal_healthcheck", + "--health-interval=1s", + "--health-retries=3", + "--health-cmd=cat /status", + "no_healthcheck") + waitForHealthStatus(c, "fatal_healthcheck", "starting", "healthy") + health := getHealth(c, "fatal_healthcheck") + c.Check(health.Status, checker.Equals, "healthy") + c.Check(health.FailingStreak, checker.Equals, 0) + last := health.Log[len(health.Log)-1] + c.Check(last.ExitCode, checker.Equals, 0) + c.Check(last.Output, checker.Equals, "OK\n") + + // Fail the check + dockerCmd(c, "exec", "fatal_healthcheck", "rm", "/status") + waitForHealthStatus(c, "fatal_healthcheck", "healthy", "unhealthy") + + failsStr, _ := dockerCmd(c, "inspect", "--format={{.State.Health.FailingStreak}}", "fatal_healthcheck") + fails, err := strconv.Atoi(strings.TrimSpace(failsStr)) + c.Check(err, check.IsNil) + c.Check(fails >= 3, checker.Equals, true) + dockerCmd(c, "rm", "-f", "fatal_healthcheck") + + // Check timeout + // Note: if the interval is too small, it seems that Docker spends all its time running health + // checks and never gets around to killing it. + _, _ = dockerCmd(c, "run", "-d", "--name=test", + "--health-interval=1s", "--health-cmd=sleep 5m", "--health-timeout=1s", imageName) + waitForHealthStatus(c, "test", "starting", "unhealthy") + health = getHealth(c, "test") + last = health.Log[len(health.Log)-1] + c.Check(health.Status, checker.Equals, "unhealthy") + c.Check(last.ExitCode, checker.Equals, -1) + c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1s)") + dockerCmd(c, "rm", "-f", "test") + + // Check JSON-format + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD ["cat", "/my status"]`)) + out, _ = dockerCmd(c, "inspect", + "--format={{.Config.Healthcheck.Test}}", imageName) + c.Check(out, checker.Equals, "[CMD cat /my status]\n") + +} + +// Github #33021 +func (s *DockerSuite) TestUnsetEnvVarHealthCheck(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox +HEALTHCHECK --interval=1s --timeout=5s --retries=5 CMD /bin/sh -c "sleep 1" +ENTRYPOINT /bin/sh -c "sleep 600"`)) + + name := "env_test_health" + // No health status before starting + dockerCmd(c, "run", "-d", "--name", name, "-e", "FOO", imageName) + defer func() { + dockerCmd(c, "rm", "-f", name) + dockerCmd(c, "rmi", imageName) + }() + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + +} diff --git a/integration-cli/docker_cli_help_test.go b/integration-cli/docker_cli_help_test.go new file mode 100644 index 0000000..d1dcd8c --- /dev/null +++ b/integration-cli/docker_cli_help_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "fmt" + "runtime" + "strings" + "unicode" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/homedir" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestHelpTextVerify(c *check.C) { + // FIXME(vdemeester) should be a unit test, probably using golden files ? + testRequires(c, DaemonIsLinux) + + // Make sure main help text fits within 80 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Test for HOME set to its default value and set to "/" on linux + // Yes on windows setting up an array and looping (right now) isn't + // necessary because we just have one value, but we'll need the + // array/loop on linux so we might as well set it up so that we can + // test any number of home dirs later on and all we need to do is + // modify the array - the rest of the testing infrastructure should work + homes := []string{homedir.Get()} + + // Non-Windows machines need to test for this special case of $HOME + if runtime.GOOS != "windows" { + homes = append(homes, "/") + } + + homeKey := homedir.Key() + baseEnvs := appendBaseEnv(true) + + // Remove HOME env var from list so we can add a new value later. + for i, env := range baseEnvs { + if strings.HasPrefix(env, homeKey+"=") { + baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) + break + } + } + + for _, home := range homes { + + // Dup baseEnvs and add our new HOME value + newEnvs := make([]string, len(baseEnvs)+1) + copy(newEnvs, baseEnvs) + newEnvs[len(newEnvs)-1] = homeKey + "=" + home + + scanForHome := runtime.GOOS != "windows" && home != "/" + + // Check main help text to make sure its not over 80 chars + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "help"}, + Env: newEnvs, + }) + result.Assert(c, icmd.Success) + lines := strings.Split(result.Combined(), "\n") + for _, line := range lines { + // All lines should not end with a space + c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space")) + + if scanForHome && strings.Contains(line, `=`+home) { + c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) + } + if runtime.GOOS != "windows" { + i := strings.Index(line, homedir.GetShortcutString()) + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + c.Fatalf("Main help should not have used home shortcut:\n%s", line) + } + } + } + + // Make sure each cmd's help text fits within 90 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Pull the list of commands from the "Commands:" section of docker help + // FIXME(vdemeester) Why re-run help ? + //helpCmd = exec.Command(dockerBinary, "help") + //helpCmd.Env = newEnvs + //out, _, err = runCommandWithOutput(helpCmd) + //c.Assert(err, checker.IsNil, check.Commentf(out)) + i := strings.Index(result.Combined(), "Commands:") + c.Assert(i, checker.GreaterOrEqualThan, 0, check.Commentf("Missing 'Commands:' in:\n%s", result.Combined())) + + cmds := []string{} + // Grab all chars starting at "Commands:" + helpOut := strings.Split(result.Combined()[i:], "\n") + // Skip first line, it is just "Commands:" + helpOut = helpOut[1:] + + // Create the list of commands we want to test + cmdsToTest := []string{} + for _, cmd := range helpOut { + // Stop on blank line or non-indented line + if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { + break + } + + // Grab just the first word of each line + cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] + cmds = append(cmds, cmd) // Saving count for later + + cmdsToTest = append(cmdsToTest, cmd) + } + + // Add some 'two word' commands - would be nice to automatically + // calculate this list - somehow + cmdsToTest = append(cmdsToTest, "volume create") + cmdsToTest = append(cmdsToTest, "volume inspect") + cmdsToTest = append(cmdsToTest, "volume ls") + cmdsToTest = append(cmdsToTest, "volume rm") + cmdsToTest = append(cmdsToTest, "network connect") + cmdsToTest = append(cmdsToTest, "network create") + cmdsToTest = append(cmdsToTest, "network disconnect") + cmdsToTest = append(cmdsToTest, "network inspect") + cmdsToTest = append(cmdsToTest, "network ls") + cmdsToTest = append(cmdsToTest, "network rm") + + if testEnv.ExperimentalDaemon() { + cmdsToTest = append(cmdsToTest, "checkpoint create") + cmdsToTest = append(cmdsToTest, "checkpoint ls") + cmdsToTest = append(cmdsToTest, "checkpoint rm") + } + + // Divide the list of commands into go routines and run the func testcommand on the commands in parallel + // to save runtime of test + + errChan := make(chan error) + + for index := 0; index < len(cmdsToTest); index++ { + go func(index int) { + errChan <- testCommand(cmdsToTest[index], newEnvs, scanForHome, home) + }(index) + } + + for index := 0; index < len(cmdsToTest); index++ { + err := <-errChan + if err != nil { + c.Fatal(err) + } + } + } +} + +func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { + // Test to make sure the exit code and output (stdout vs stderr) of + // various good and bad cases are what we expect + + // docker : stdout=all, stderr=empty, rc=0 + out := cli.DockerCmd(c).Combined() + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker'\n")) + + // docker help: stdout=all, stderr=empty, rc=0 + out = cli.DockerCmd(c, "help").Combined() + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker help'\n")) + + // docker --help: stdout=all, stderr=empty, rc=0 + out = cli.DockerCmd(c, "--help").Combined() + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker --help'\n")) + + // docker inspect busybox: stdout=all, stderr=empty, rc=0 + // Just making sure stderr is empty on valid cmd + out = cli.DockerCmd(c, "inspect", "busybox").Combined() + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker inspect busyBox'\n")) + + // docker rm: stdout=empty, stderr=all, rc!=0 + // testing the min arg error msg + cli.Docker(cli.Args("rm")).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Out: "", + // Should not contain full help text but should contain info about + // # of args and Usage line + Err: "requires at least 1 argument", + }) + + // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 + // testing to make sure no blank line on error + result := cli.Docker(cli.Args("rm", "NoSuchContainer")).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Out: "", + }) + // Be really picky + c.Assert(len(result.Stderr()), checker.Not(checker.Equals), 0) + c.Assert(result.Stderr(), checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) + + // docker BadCmd: stdout=empty, stderr=all, rc=0 + cli.Docker(cli.Args("BadCmd")).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Out: "", + Err: "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'\n", + }) +} + +func testCommand(cmd string, newEnvs []string, scanForHome bool, home string) error { + + args := strings.Split(cmd+" --help", " ") + + // Check the full usage text + result := icmd.RunCmd(icmd.Cmd{ + Command: append([]string{dockerBinary}, args...), + Env: newEnvs, + }) + err := result.Error + out := result.Stdout() + stderr := result.Stderr() + if len(stderr) != 0 { + return fmt.Errorf("Error on %q help. non-empty stderr:%q\n", cmd, stderr) + } + if strings.HasSuffix(out, "\n\n") { + return fmt.Errorf("Should not have blank line on %q\n", cmd) + } + if !strings.Contains(out, "--help") { + return fmt.Errorf("All commands should mention '--help'. Command '%v' did not.\n", cmd) + } + + if err != nil { + return fmt.Errorf(out) + } + + // Check each line for lots of stuff + lines := strings.Split(out, "\n") + for _, line := range lines { + i := strings.Index(line, "~") + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + return fmt.Errorf("Help for %q should not have used ~:\n%s", cmd, line) + } + + // Options should NOT end with a period + if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { + return fmt.Errorf("Help for %q should not end with a period: %s", cmd, line) + } + + // Options should NOT end with a space + if strings.HasSuffix(line, " ") { + return fmt.Errorf("Help for %q should not end with a space: %s", cmd, line) + } + + } + + // For each command make sure we generate an error + // if we give a bad arg + args = strings.Split(cmd+" --badArg", " ") + + out, _, err = dockerCmdWithError(args...) + if err == nil { + return fmt.Errorf(out) + } + + // Be really picky + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line at the end of 'docker rm'\n") + } + + // Now make sure that each command will print a short-usage + // (not a full usage - meaning no opts section) if we + // are missing a required arg or pass in a bad arg + + // These commands will never print a short-usage so don't test + noShortUsage := map[string]string{ + "images": "", + "login": "", + "logout": "", + "network": "", + "stats": "", + "volume create": "", + } + + if _, ok := noShortUsage[cmd]; !ok { + // skipNoArgs are ones that we don't want to try w/o + // any args. Either because it'll hang the test or + // lead to incorrect test result (like false negative). + // Whatever the reason, skip trying to run w/o args and + // jump to trying with a bogus arg. + skipNoArgs := map[string]struct{}{ + "daemon": {}, + "events": {}, + "load": {}, + } + + var result *icmd.Result + if _, ok := skipNoArgs[cmd]; !ok { + result = dockerCmdWithResult(strings.Split(cmd, " ")...) + } + + // If its ok w/o any args then try again with an arg + if result == nil || result.ExitCode == 0 { + result = dockerCmdWithResult(strings.Split(cmd+" badArg", " ")...) + } + + if err := result.Compare(icmd.Expected{ + Out: icmd.None, + Err: "\nUsage:", + ExitCode: 1, + }); err != nil { + return err + } + + stderr := result.Stderr() + // Shouldn't have full usage + if strings.Contains(stderr, "--help=false") { + return fmt.Errorf("Should not have full usage on %q:%v", result.Cmd.Args, stderr) + } + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line on %q\n%v", result.Cmd.Args, stderr) + } + } + + return nil +} diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go new file mode 100644 index 0000000..43c4b94 --- /dev/null +++ b/integration-cli/docker_cli_history_test.go @@ -0,0 +1,119 @@ +package main + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/go-check/check" +) + +// This is a heisen-test. Because the created timestamp of images and the behavior of +// sort is not predictable it doesn't always fail. +func (s *DockerSuite) TestBuildHistory(c *check.C) { + name := "testbuildhistory" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL label.A="A" +LABEL label.B="B" +LABEL label.C="C" +LABEL label.D="D" +LABEL label.E="E" +LABEL label.F="F" +LABEL label.G="G" +LABEL label.H="H" +LABEL label.I="I" +LABEL label.J="J" +LABEL label.K="K" +LABEL label.L="L" +LABEL label.M="M" +LABEL label.N="N" +LABEL label.O="O" +LABEL label.P="P" +LABEL label.Q="Q" +LABEL label.R="R" +LABEL label.S="S" +LABEL label.T="T" +LABEL label.U="U" +LABEL label.V="V" +LABEL label.W="W" +LABEL label.X="X" +LABEL label.Y="Y" +LABEL label.Z="Z"`)) + + out, _ := dockerCmd(c, "history", name) + actualValues := strings.Split(out, "\n")[1:27] + expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} + + for i := 0; i < 26; i++ { + echoValue := fmt.Sprintf("LABEL label.%s=%s", expectedValues[i], expectedValues[i]) + actualValue := actualValues[i] + c.Assert(actualValue, checker.Contains, echoValue) + } + +} + +func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { + dockerCmd(c, "history", "busybox") +} + +func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { + _, _, err := dockerCmdWithError("history", "testHistoryNonExistentImage") + c.Assert(err, checker.NotNil, check.Commentf("history on a non-existent image should fail.")) +} + +func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { + name := "testhistoryimagewithcomment" + + // make an image through docker commit [ -m messages ] + + dockerCmd(c, "run", "--name", name, "busybox", "true") + dockerCmd(c, "wait", name) + + comment := "This_is_a_comment" + dockerCmd(c, "commit", "-m="+comment, name, name) + + // test docker history to check comment messages + + out, _ := dockerCmd(c, "history", name) + outputTabs := strings.Fields(strings.Split(out, "\n")[1]) + actualValue := outputTabs[len(outputTabs)-1] + c.Assert(actualValue, checker.Contains, comment) +} + +func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=false", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + + _, err := strconv.Atoi(strings.TrimSpace(sizeString)) + c.Assert(err, checker.IsNil, check.Commentf("The size '%s' was not an Integer", sizeString)) + } +} + +func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=true", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + humanSizeRegexRaw := "\\d+.*B" // Matches human sizes like 10 MB, 3.2 KB, etc + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + c.Assert(strings.TrimSpace(sizeString), checker.Matches, humanSizeRegexRaw, check.Commentf("The size '%s' was not in human format", sizeString)) + } +} diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go new file mode 100644 index 0000000..dccbe12 --- /dev/null +++ b/integration-cli/docker_cli_images_test.go @@ -0,0 +1,366 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images") + c.Assert(imagesOut, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestImagesEnsureImageWithTagIsListed(c *check.C) { + name := "imagewithtag" + dockerCmd(c, "tag", "busybox", name+":v1") + dockerCmd(c, "tag", "busybox", name+":v1v1") + dockerCmd(c, "tag", "busybox", name+":v2") + + imagesOut, _ := dockerCmd(c, "images", name+":v1") + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Not(checker.Contains), "v2") + c.Assert(imagesOut, checker.Not(checker.Contains), "v1v1") + + imagesOut, _ = dockerCmd(c, "images", name) + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Contains, "v1v1") + c.Assert(imagesOut, checker.Contains, "v2") +} + +func (s *DockerSuite) TestImagesEnsureImageWithBadTagIsNotListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images", "busybox:nonexistent") + c.Assert(imagesOut, checker.Not(checker.Contains), "busybox") +} + +func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { + buildImageSuccessfully(c, "order:test_a", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio1`)) + id1 := getIDByName(c, "order:test_a") + time.Sleep(1 * time.Second) + buildImageSuccessfully(c, "order:test_c", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio2`)) + id2 := getIDByName(c, "order:test_c") + time.Sleep(1 * time.Second) + buildImageSuccessfully(c, "order:test_b", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio3`)) + id3 := getIDByName(c, "order:test_b") + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc") + imgs := strings.Split(out, "\n") + c.Assert(imgs[0], checker.Equals, id3, check.Commentf("First image must be %s, got %s", id3, imgs[0])) + c.Assert(imgs[1], checker.Equals, id2, check.Commentf("First image must be %s, got %s", id2, imgs[1])) + c.Assert(imgs[2], checker.Equals, id1, check.Commentf("First image must be %s, got %s", id1, imgs[2])) +} + +func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { + imageName1 := "images_filter_test1" + imageName2 := "images_filter_test2" + imageName3 := "images_filter_test3" + buildImageSuccessfully(c, imageName1, build.WithDockerfile(`FROM busybox + LABEL match me`)) + image1ID := getIDByName(c, imageName1) + + buildImageSuccessfully(c, imageName2, build.WithDockerfile(`FROM busybox + LABEL match="me too"`)) + image2ID := getIDByName(c, imageName2) + + buildImageSuccessfully(c, imageName3, build.WithDockerfile(`FROM busybox + LABEL nomatch me`)) + image3ID := getIDByName(c, imageName3) + + out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") + out = strings.TrimSpace(out) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image1ID)) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image2ID)) + c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image3ID)) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, image2ID) +} + +// Regression : #15659 +func (s *DockerSuite) TestCommitWithFilterLabel(c *check.C) { + // Create a container + dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") + // Commit with labels "using changes" + out, _ := dockerCmd(c, "commit", "-c", "LABEL foo.version=1.0.0-1", "-c", "LABEL foo.name=bar", "-c", "LABEL foo.author=starlord", "bar", "bar:1.0.0-1") + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=foo.version=1.0.0-1") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, imageID) +} + +func (s *DockerSuite) TestImagesFilterSinceAndBefore(c *check.C) { + buildImageSuccessfully(c, "image:1", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=1`)) + imageID1 := getIDByName(c, "image:1") + buildImageSuccessfully(c, "image:2", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=2`)) + imageID2 := getIDByName(c, "image:2") + buildImageSuccessfully(c, "image:3", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=3`)) + imageID3 := getIDByName(c, "image:3") + + expected := []string{imageID3, imageID2} + + out, _ := dockerCmd(c, "images", "-f", "since=image:1", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID1, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID3} + + out, _ = dockerCmd(c, "images", "-f", "since=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID2, imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:3", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID3, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) +} + +func assertImageList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + imageIDIndex := strings.Index(lines[0], "IMAGE ID") + for i := 0; i < len(expected); i++ { + imageID := lines[i+1][imageIDIndex : imageIDIndex+12] + found := false + for _, e := range expected { + if imageID == e[7:19] { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +// FIXME(vdemeester) should be a unit test on `docker image ls` +func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { + imageName := "images_filter_test" + // Build a image and fail to build so that we have dangling images ? + buildImage(imageName, build.WithDockerfile(`FROM busybox + RUN touch /test/foo + RUN touch /test/bar + RUN touch /test/baz`)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + filters := []string{ + "dangling=true", + "Dangling=true", + " dangling=true", + "dangling=true ", + "dangling = true", + } + + imageListings := make([][]string, 5, 5) + for idx, filter := range filters { + out, _ := dockerCmd(c, "images", "-q", "-f", filter) + listing := strings.Split(out, "\n") + sort.Strings(listing) + imageListings[idx] = listing + } + + for idx, listing := range imageListings { + if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { + for idx, errListing := range imageListings { + fmt.Printf("out %d\n", idx) + for _, image := range errListing { + fmt.Print(image) + } + fmt.Print("") + } + c.Fatalf("All output must be the same") + } + } +} + +func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { + testRequires(c, DaemonIsLinux) + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "foobox") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + // overwrite the tag, making the previous image dangling + dockerCmd(c, "tag", "busybox", "foobox") + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") + // Expect one dangling image + c.Assert(strings.Count(out, imageID), checker.Equals, 1) + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false") + //dangling=false would not include dangling images + c.Assert(out, checker.Not(checker.Contains), imageID) + + out, _ = dockerCmd(c, "images") + //docker images still include dangling images + c.Assert(out, checker.Contains, imageID) + +} + +// FIXME(vdemeester) should be a unit test for `docker image ls` +func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker + ENV foo bar` + name := "scratch-image" + result := buildImage(name, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + id := getIDByName(c, name) + + // this is just the output of docker build + // we're interested in getting the image id of the MAINTAINER instruction + // and that's located at output, line 5, from 7 to end + split := strings.Split(result.Combined(), "\n") + intermediate := strings.TrimSpace(split[5][7:]) + + out, _ := dockerCmd(c, "images") + // images shouldn't show non-heads images + c.Assert(out, checker.Not(checker.Contains), intermediate) + // images should contain final built images + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support FROM scratch + dockerfile := ` + FROM scratch + MAINTAINER docker` + + name := "scratch-image" + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) + + out, _ := dockerCmd(c, "images") + // images should contain images built from scratch + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// For W2W - equivalent to TestImagesEnsureImagesFromScratchShown but Windows +// doesn't support from scratch +func (s *DockerSuite) TestImagesEnsureImagesFromBusyboxShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker` + name := "busybox-image" + + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) + + out, _ := dockerCmd(c, "images") + // images should contain images built from busybox + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// #18181 +func (s *DockerSuite) TestImagesFilterNameWithPort(c *check.C) { + tag := "a.b.c.d:5000/hello" + dockerCmd(c, "tag", "busybox", tag) + out, _ := dockerCmd(c, "images", tag) + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":latest") + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":no-such-tag") + c.Assert(out, checker.Not(checker.Contains), tag) +} + +func (s *DockerSuite) TestImagesFormat(c *check.C) { + // testRequires(c, DaemonIsLinux) + tag := "myimage" + dockerCmd(c, "tag", "busybox", tag+":v1") + dockerCmd(c, "tag", "busybox", tag+":v2") + + out, _ := dockerCmd(c, "images", "--format", "{{.Repository}}", tag) + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"myimage", "myimage"} + var names []string + names = append(names, lines...) + c.Assert(names, checker.DeepEquals, expected, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// ImagesDefaultFormatAndQuiet +func (s *DockerSuite) TestImagesFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "myimage") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + config := `{ + "imagesFormat": "{{ .ID }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "--config", d, "images", "-q", "myimage") + c.Assert(out, checker.Equals, imageID+"\n", check.Commentf("Expected to print only the image id, got %v\n", out)) +} diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go new file mode 100644 index 0000000..711f39b --- /dev/null +++ b/integration-cli/docker_cli_import_test.go @@ -0,0 +1,143 @@ +package main + +import ( + "bufio" + "compress/gzip" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImportDisplay(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + + image := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportBadURL(c *check.C) { + out, _, err := dockerCmdWithError("import", "http://nourl/bad") + c.Assert(err, checker.NotNil, check.Commentf("import was supposed to fail but didn't")) + // Depending on your system you can get either of these errors + if !strings.Contains(out, "dial tcp") && + !strings.Contains(out, "ApplyLayer exit status 1 stdout: stderr: archive/tar: invalid tar header") && + !strings.Contains(out, "Error processing tar file") { + c.Fatalf("expected an error msg but didn't get one.\nErr: %v\nOut: %v", err, out) + } +} + +func (s *DockerSuite) TestImportFile(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: bufio.NewWriter(temporaryFile), + }).Assert(c, icmd.Success) + + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportGzipped(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + w := gzip.NewWriter(temporaryFile) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: w, + }).Assert(c, icmd.Success) + c.Assert(w.Close(), checker.IsNil, check.Commentf("failed to close gzip writer")) + temporaryFile.Close() + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportFileWithMessage(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: bufio.NewWriter(temporaryFile), + }).Assert(c, icmd.Success) + + message := "Testing commit message" + out, _ := dockerCmd(c, "import", "-m", message, temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "history", image) + split := strings.Split(out, "\n") + + c.Assert(split, checker.HasLen, 3, check.Commentf("expected 3 lines from image history")) + r := regexp.MustCompile("[\\s]{2,}") + split = r.Split(split[1], -1) + + c.Assert(message, checker.Equals, split[3], check.Commentf("didn't get expected value in commit message")) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing")) +} + +func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { + _, _, err := dockerCmdWithError("import", "example.com/myImage.tar") + c.Assert(err, checker.NotNil, check.Commentf("import non-existing file must failed")) +} + +func (s *DockerSuite) TestImportWithQuotedChanges(c *check.C) { + testRequires(c, DaemonIsLinux) + cli.DockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + cli.Docker(cli.Args("export", "test-import"), cli.WithStdout(bufio.NewWriter(temporaryFile))).Assert(c, icmd.Success) + + result := cli.DockerCmd(c, "import", "-c", `ENTRYPOINT ["/bin/sh", "-c"]`, temporaryFile.Name()) + image := strings.TrimSpace(result.Stdout()) + + result = cli.DockerCmd(c, "run", "--rm", image, "true") + c.Assert(result, icmd.Matches, icmd.Expected{Out: icmd.None}) +} diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go new file mode 100644 index 0000000..4ccc9ae --- /dev/null +++ b/integration-cli/docker_cli_info_test.go @@ -0,0 +1,243 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +// ensure docker info succeeds +func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "info") + + // always shown fields + stringsToCheck := []string{ + "ID:", + "Containers:", + " Running:", + " Paused:", + " Stopped:", + "Images:", + "OSType:", + "Architecture:", + "Logging Driver:", + "Operating System:", + "CPUs:", + "Total Memory:", + "Kernel Version:", + "Storage Driver:", + "Volume:", + "Network:", + "Live Restore Enabled:", + } + + if testEnv.DaemonPlatform() == "linux" { + stringsToCheck = append(stringsToCheck, "Init Binary:", "Security Options:", "containerd version:", "runc version:", "init version:") + } + + if DaemonIsLinux() { + stringsToCheck = append(stringsToCheck, "Runtimes:", "Default Runtime: runc") + } + + if testEnv.ExperimentalDaemon() { + stringsToCheck = append(stringsToCheck, "Experimental: true") + } else { + stringsToCheck = append(stringsToCheck, "Experimental: false") + } + + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix, check.Commentf("couldn't find string %v in output", linePrefix)) + } +} + +// TestInfoFormat tests `docker info --format` +func (s *DockerSuite) TestInfoFormat(c *check.C) { + out, status := dockerCmd(c, "info", "--format", "{{json .}}") + c.Assert(status, checker.Equals, 0) + var m map[string]interface{} + err := json.Unmarshal([]byte(out), &m) + c.Assert(err, checker.IsNil) + _, _, err = dockerCmdWithError("info", "--format", "{{.badString}}") + c.Assert(err, checker.NotNil) +} + +// TestInfoDiscoveryBackend verifies that a daemon run with `--cluster-advertise` and +// `--cluster-store` properly show the backend's endpoint in info output. +func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { + c.Skip("swarm isn't supported") + + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "1.1.1.1:2375" + d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) + defer d.Stop(c) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s\n", discoveryAdvertise)) +} + +// TestInfoDiscoveryInvalidAdvertise verifies that a daemon run with +// an invalid `--cluster-advertise` configuration +func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + discoveryBackend := "consul://consuladdr:consulport/some/path" + + // --cluster-advertise with an invalid string is an error + err := d.StartWithError(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") + c.Assert(err, checker.NotNil) + + // --cluster-advertise without --cluster-store is also an error + err = d.StartWithError("--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.NotNil) +} + +// TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise` +// configured with interface name properly show the advertise ip-address in info output. +func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { + c.Skip("swarm isn't supported") + + testRequires(c, SameHostDaemon, Network, DaemonIsLinux) + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "eth0" + + d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) + defer d.Stop(c) + + iface, err := net.InterfaceByName(discoveryAdvertise) + c.Assert(err, checker.IsNil) + addrs, err := iface.Addrs() + c.Assert(err, checker.IsNil) + c.Assert(len(addrs), checker.GreaterThan, 0) + ip, _, err := net.ParseCIDR(addrs[0].String()) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s:2375\n", ip.String())) +} + +func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "-d", "busybox", "top") + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { + testRequires(c, IsPausable) + + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) +} + +func (s *DockerSuite) TestInfoDebug(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + d.Start(c, "--debug") + defer d.Stop(c) + + out, err := d.Cmd("--debug", "info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Debug Mode (client): true\n") + c.Assert(out, checker.Contains, "Debug Mode (server): true\n") + c.Assert(out, checker.Contains, "File Descriptors") + c.Assert(out, checker.Contains, "Goroutines") + c.Assert(out, checker.Contains, "System Time") + c.Assert(out, checker.Contains, "EventsListeners") + c.Assert(out, checker.Contains, "Docker Root Dir") +} + +func (s *DockerSuite) TestInsecureRegistries(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryCIDR := "192.168.1.0/24" + registryHost := "insecurehost.com:5000" + + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + d.Start(c, "--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) + defer d.Stop(c) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Insecure Registries:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryHost)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryCIDR)) +} + +func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryMirror1 := "https://192.168.1.2" + registryMirror2 := "http://registry.mirror.com:5000" + + s.d.Start(c, "--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Registry Mirrors:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror2)) +} + +// Test case for #24392 +func (s *DockerDaemonSuite) TestInfoLabels(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + s.d.Start(c, "--label", `test.empty=`, "--label", `test.empty=`, "--label", `test.label="1"`, "--label", `test.label="2"`) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "WARNING: labels with duplicate keys and conflicting values have been deprecated") +} diff --git a/integration-cli/docker_cli_info_unix_test.go b/integration-cli/docker_cli_info_unix_test.go new file mode 100644 index 0000000..d55c05c --- /dev/null +++ b/integration-cli/docker_cli_info_unix_test.go @@ -0,0 +1,15 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoSecurityOptions(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, Apparmor, DaemonIsLinux) + + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, "Security Options:\n apparmor\n seccomp\n Profile: default\n") +} diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go new file mode 100644 index 0000000..2b216be --- /dev/null +++ b/integration-cli/docker_cli_inspect_test.go @@ -0,0 +1,470 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func checkValidGraphDriver(c *check.C, name string) { + if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { + c.Fatalf("%v is not a valid graph driver name", name) + } +} + +func (s *DockerSuite) TestInspectImage(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + // It is important that this ID remain stable. If a code change causes + // it to be different, this is equivalent to a cache bust when pulling + // a legacy-format manifest. If the check at the end of this function + // fails, fix the difference in the image serialization instead of + // updating this hash. + imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" + id := inspectField(c, imageTest, "Id") + + c.Assert(id, checker.Equals, imageTestID) +} + +func (s *DockerSuite) TestInspectInt64(c *check.C) { + dockerCmd(c, "run", "-d", "-m=300M", "--name", "inspectTest", "busybox", "true") + inspectOut := inspectField(c, "inspectTest", "HostConfig.Memory") + c.Assert(inspectOut, checker.Equals, "314572800") +} + +func (s *DockerSuite) TestInspectDefault(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch the container JSON. + //If the container JSON is not available, it will go for the image JSON. + + out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + containerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, "busybox", "Id") + c.Assert(strings.TrimSpace(inspectOut), checker.Equals, containerID) +} + +func (s *DockerSuite) TestInspectStatus(c *check.C) { + out := runSleepingContainer(c, "-d") + out = strings.TrimSpace(out) + + inspectOut := inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + + // Windows does not support pause/unpause on Windows Server Containers. + // (RS1 does for Hyper-V Containers, but production CI is not setup for that) + if testEnv.DaemonPlatform() != "windows" { + dockerCmd(c, "pause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "paused") + + dockerCmd(c, "unpause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + } + + dockerCmd(c, "stop", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "exited") + +} + +func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch container + //JSON State.Running field. If the field is true, it's a container. + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.State.Running}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(out, checker.Equals, "true\n") // not a container JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) { + //Run this test on an image named busybox. docker inspect will try to fetch container + //JSON. Since there is no container named busybox and --type=container, docker inspect will + //not try to get the image JSON. It will throw an error. + + dockerCmd(c, "run", "-d", "busybox", "true") + + _, _, err := dockerCmdWithError("inspect", "--type=container", "busybox") + // docker inspect should fail, as there is no container named busybox + c.Assert(err, checker.NotNil) +} + +func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch image + //JSON as --type=image. if there is no image with name busybox, docker inspect + //will throw an error. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, _ := dockerCmd(c, "inspect", "--type=image", "busybox") + c.Assert(out, checker.Not(checker.Contains), "State") // not an image JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) { + //Both the container and image are named busybox. docker inspect will fail + //as --type=foobar is not a valid value for the flag. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("%s", exitCode)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("%s", err)) + c.Assert(out, checker.Contains, "not a valid value for --type") +} + +func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + out := inspectField(c, imageTest, "Size") + + size, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect size of the image: %s, %v", out, err)) + + //now see if the size turns out to be the same + formatStr := fmt.Sprintf("--format={{eq .Size %d}}", size) + out, _ = dockerCmd(c, "inspect", formatStr, imageTest) + result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(result, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat"}, + Stdin: strings.NewReader("blahblah"), + }) + result.Assert(c, icmd.Success) + out := result.Stdout() + id := strings.TrimSpace(out) + + out = inspectField(c, id, "State.ExitCode") + + exitCode, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect exitcode of the container: %s, %v", out, err)) + + //now get the exit code to verify + formatStr := fmt.Sprintf("--format={{eq .State.ExitCode %d}}", exitCode) + out, _ = dockerCmd(c, "inspect", formatStr, id) + inspectResult, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(inspectResult, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + imageTest := "emptyfs" + name := inspectField(c, imageTest, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + deviceID := inspectField(c, imageTest, "GraphDriver.Data.DeviceId") + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, imageTest, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + out = strings.TrimSpace(out) + + name := inspectField(c, out, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + imageDeviceID := inspectField(c, "busybox", "GraphDriver.Data.DeviceId") + + deviceID := inspectField(c, out, "GraphDriver.Data.DeviceId") + + c.Assert(imageDeviceID, checker.Not(checker.Equals), deviceID) + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, out, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { + modifier := ",z" + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if testEnv.DaemonPlatform() == "windows" { + modifier = "" + // Linux creates the host directory if it doesn't exist. Windows does not. + os.Mkdir(`c:\data`, os.ModeDir) + } + + dockerCmd(c, "run", "-d", "--name", "test", "-v", prefix+slash+"data:"+prefix+slash+"data:ro"+modifier, "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, check.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "") + c.Assert(m.Driver, checker.Equals, "") + c.Assert(m.Source, checker.Equals, prefix+slash+"data") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + if testEnv.DaemonPlatform() != "windows" { // Windows does not set mode + c.Assert(m.Mode, checker.Equals, "ro"+modifier) + } + c.Assert(m.RW, checker.Equals, false) +} + +func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:"+prefix+slash+"data", "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, checker.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "data") + c.Assert(m.Driver, checker.Equals, "local") + c.Assert(m.Source, checker.Not(checker.Equals), "") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + c.Assert(m.RW, checker.Equals, true) +} + +// #14947 +func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + id := strings.TrimSpace(out) + startedAt := inspectField(c, id, "State.StartedAt") + finishedAt := inspectField(c, id, "State.FinishedAt") + created := inspectField(c, id, "Created") + + _, err := time.Parse(time.RFC3339Nano, startedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, finishedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) + + created = inspectField(c, "busybox", "Created") + + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) +} + +// #15633 +func (s *DockerSuite) TestInspectLogConfigNoType(c *check.C) { + dockerCmd(c, "create", "--name=test", "--log-opt", "max-file=42", "busybox") + var logConfig container.LogConfig + + out := inspectFieldJSON(c, "test", "HostConfig.LogConfig") + + err := json.NewDecoder(strings.NewReader(out)).Decode(&logConfig) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + + c.Assert(logConfig.Type, checker.Equals, "json-file") + c.Assert(logConfig.Config["max-file"], checker.Equals, "42", check.Commentf("%v", logConfig)) +} + +func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fetch container + //JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields. + + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.SizeRw}},{{.SizeRootFs}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Expected not to display size info: %s", out)) +} + +func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" + out, _ := dockerCmd(c, "inspect", "-s", "--type=container", formatStr, "busybox") + sz := strings.Split(out, ",") + + c.Assert(strings.TrimSpace(sz[0]), check.Not(check.Equals), "") + c.Assert(strings.TrimSpace(sz[1]), check.Not(check.Equals), "") +} + +func (s *DockerSuite) TestInspectTemplateError(c *check.C) { + // Template parsing error for both the container and image. + + runSleepingContainer(c, "--name=container1", "-d") + + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='Format container: {{.ThisDoesNotExist}}'", "container1") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") + + out, _, err = dockerCmdWithError("inspect", "--type=image", "--format='Format container: {{.ThisDoesNotExist}}'", "busybox") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestInspectJSONFields(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format={{.HostConfig.Dns}}", "busybox") + + c.Assert(err, check.IsNil) + c.Assert(out, checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestInspectByPrefix(c *check.C) { + id := inspectField(c, "busybox", "Id") + c.Assert(id, checker.HasPrefix, "sha256:") + + id2 := inspectField(c, id[:12], "Id") + c.Assert(id, checker.Equals, id2) + + id3 := inspectField(c, strings.TrimPrefix(id, "sha256:")[:12], "Id") + c.Assert(id, checker.Equals, id3) +} + +func (s *DockerSuite) TestInspectStopWhenNotFound(c *check.C) { + runSleepingContainer(c, "--name=busybox1", "-d") + runSleepingContainer(c, "--name=busybox2", "-d") + result := dockerCmdWithResult("inspect", "--type=container", "--format='{{.Name}}'", "busybox1", "busybox2", "missing") + + c.Assert(result.Error, checker.Not(check.IsNil)) + c.Assert(result.Stdout(), checker.Contains, "busybox1") + c.Assert(result.Stdout(), checker.Contains, "busybox2") + c.Assert(result.Stderr(), checker.Contains, "Error: No such container: missing") + + // test inspect would not fast fail + result = dockerCmdWithResult("inspect", "--type=container", "--format='{{.Name}}'", "missing", "busybox1", "busybox2") + + c.Assert(result.Error, checker.Not(check.IsNil)) + c.Assert(result.Stdout(), checker.Contains, "busybox1") + c.Assert(result.Stdout(), checker.Contains, "busybox2") + c.Assert(result.Stderr(), checker.Contains, "Error: No such container: missing") +} + +func (s *DockerSuite) TestInspectHistory(c *check.C) { + dockerCmd(c, "run", "--name=testcont", "busybox", "echo", "hello") + dockerCmd(c, "commit", "-m", "test comment", "testcont", "testimg") + out, _, err := dockerCmdWithError("inspect", "--format='{{.Comment}}'", "testimg") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "test comment") +} + +func (s *DockerSuite) TestInspectContainerNetworkDefault(c *check.C) { + testRequires(c, DaemonIsLinux) + + contName := "test1" + dockerCmd(c, "run", "--name", contName, "-d", "busybox", "top") + netOut, _ := dockerCmd(c, "network", "inspect", "--format={{.ID}}", "bridge") + out := inspectField(c, contName, "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "bridge") + out = inspectField(c, contName, "NetworkSettings.Networks.bridge.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectContainerNetworkCustom(c *check.C) { + testRequires(c, DaemonIsLinux) + + netOut, _ := dockerCmd(c, "network", "create", "net1") + dockerCmd(c, "run", "--name=container1", "--net=net1", "-d", "busybox", "top") + out := inspectField(c, "container1", "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "net1") + out = inspectField(c, "container1", "NetworkSettings.Networks.net1.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectRootFS(c *check.C) { + out, _, err := dockerCmdWithError("inspect", "busybox") + c.Assert(err, check.IsNil) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + + c.Assert(len(imageJSON[0].RootFS.Layers), checker.GreaterOrEqualThan, 1) +} + +func (s *DockerSuite) TestInspectAmpersand(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "--env", `TEST_ENV="soanni&rtr"`, "busybox", "env") + c.Assert(out, checker.Contains, `soanni&rtr`) + out, _ = dockerCmd(c, "inspect", name) + c.Assert(out, checker.Contains, `soanni&rtr`) +} + +func (s *DockerSuite) TestInspectPlugin(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + // Even without tag the inspect still work + out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +// Test case for 29185 +func (s *DockerSuite) TestInspectUnknownObject(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: foobar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: foobar") +} + +func (s *DockerSuite) TestInspectInvalidReference(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "FooBar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: FooBar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: FooBar") +} diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go new file mode 100644 index 0000000..3273ecf --- /dev/null +++ b/integration-cli/docker_cli_kill_test.go @@ -0,0 +1,138 @@ +package main + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestKillContainer(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + cli.WaitRun(c, cleanedContainerID) + + cli.DockerCmd(c, "kill", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) + + out = cli.DockerCmd(c, "ps", "-q").Combined() + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +func (s *DockerSuite) TestKillOffStoppedContainer(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + cli.DockerCmd(c, "stop", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) + + cli.Docker(cli.Args("kill", "-s", "30", cleanedContainerID)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { + // TODO Windows: Windows does not yet support -u (Feb 2016). + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top").Combined() + cleanedContainerID := strings.TrimSpace(out) + cli.WaitRun(c, cleanedContainerID) + + cli.DockerCmd(c, "kill", cleanedContainerID) + cli.WaitExited(c, cleanedContainerID, 10*time.Second) + + out = cli.DockerCmd(c, "ps", "-q").Combined() + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +// regression test about correct signal parsing see #13665 +func (s *DockerSuite) TestKillWithSignal(c *check.C) { + // Cannot port to Windows - does not support signals in the same way Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + dockerCmd(c, "kill", "-s", "SIGWINCH", cid) + time.Sleep(250 * time.Millisecond) + + running := inspectField(c, cid, "State.Running") + + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after SIGWINCH")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithSameSignalShouldDisableRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "-d", "--stop-signal=TERM", "--restart=always", "busybox", "top").Combined() + cid := strings.TrimSpace(out) + cli.WaitRun(c, cid) + + // Let docker send a TERM signal to the container + // It will kill the process and disable the restart policy + cli.DockerCmd(c, "kill", "-s", "TERM", cid) + cli.WaitExited(c, cid, 10*time.Second) + + out = cli.DockerCmd(c, "ps", "-q").Combined() + c.Assert(out, checker.Not(checker.Contains), cid, check.Commentf("killed container is still running")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithDifferentSignalShouldKeepRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "-d", "--stop-signal=CONT", "--restart=always", "busybox", "top").Combined() + cid := strings.TrimSpace(out) + cli.WaitRun(c, cid) + + // Let docker send a TERM signal to the container + // It will kill the process, but not disable the restart policy + cli.DockerCmd(c, "kill", "-s", "TERM", cid) + cli.WaitRestart(c, cid, 10*time.Second) + + // Restart policy should still be in place, so it should be still running + cli.WaitRun(c, cid) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { + out := runSleepingContainer(c, "-d") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err := dockerCmdWithError("kill", "-s", "0", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: 0", check.Commentf("Kill with an invalid signal didn't error out correctly")) + + running := inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + + out = runSleepingContainer(c, "-d") + cid = strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err = dockerCmdWithError("kill", "-s", "SIG42", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: SIG42", check.Commentf("Kill with an invalid signal error out correctly")) + + running = inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + +} + +func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "--name", "docker-kill-test-api", "-d") + dockerCmd(c, "stop", "docker-kill-test-api") + + status, _, err := request.SockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) +} diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go new file mode 100644 index 0000000..b43c6d1 --- /dev/null +++ b/integration-cli/docker_cli_links_test.go @@ -0,0 +1,236 @@ +package main + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/testutil" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + _, exitCode, err := dockerCmdWithError("run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + + // run ping failed with error + c.Assert(exitCode, checker.Equals, 1, check.Commentf("error: %v", err)) +} + +// Test for appropriate error when calling --link with an invalid target container +func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--link", "bogus:alias", "busybox", "true") + + // an invalid container target should produce an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an invalid container target should produce an error + c.Assert(out, checker.Contains, "Could not get container") +} + +func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + // Test with the three different ways of specifying the default network on Linux + testLinkPingOnNetwork(c, "") + testLinkPingOnNetwork(c, "default") + testLinkPingOnNetwork(c, "bridge") +} + +func testLinkPingOnNetwork(c *check.C, network string) { + var postArgs []string + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "top"}...) + runArgs1 := append([]string{"run", "-d", "--name", "container1", "--hostname", "fred"}, postArgs...) + runArgs2 := append([]string{"run", "-d", "--name", "container2", "--hostname", "wilma"}, postArgs...) + + // Run the two named containers + dockerCmd(c, runArgs1...) + dockerCmd(c, runArgs2...) + + postArgs = []string{} + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "sh", "-c"}...) + + // Format a run for a container which links to the other two + runArgs := append([]string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2"}, postArgs...) + pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" + + // test ping by alias, ping by name, and ping by hostname + // 1. Ping by alias + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) + // 2. Ping by container name + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) + // 3. Ping by hostname + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) + + // Clean for next round + dockerCmd(c, "rm", "-f", "container1") + dockerCmd(c, "rm", "-f", "container2") +} + +func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(c, "kill", idA) + dockerCmd(c, "kill", idB) + +} + +func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := testutil.ConvertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := testutil.ConvertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name=first", "busybox", "top") + dockerCmd(c, "create", "--name=second", "--link=first:first", "busybox", "top") + dockerCmd(c, "start", "first") + +} + +func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + + out, _ := dockerCmd(c, "run", "-itd", "--name", "one", "busybox", "top") + idOne := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top") + idTwo := strings.TrimSpace(out) + + c.Assert(waitRun(idTwo), checker.IsNil) + + readContainerFileWithExec(c, idOne, "/etc/hosts") + contentTwo := readContainerFileWithExec(c, idTwo, "/etc/hosts") + // Host is not present in updated hosts file + c.Assert(string(contentTwo), checker.Contains, "onetwo") +} + +func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top") + out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top") + id := strings.TrimSpace(string(out)) + + realIP := inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + content := readContainerFileWithExec(c, id, "/etc/hosts") + + getIP := func(hosts []byte, hostname string) string { + re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) + matches := re.FindSubmatch(hosts) + c.Assert(matches, checker.NotNil, check.Commentf("Hostname %s have no matches in hosts", hostname)) + return string(matches[1]) + } + ip := getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) + + dockerCmd(c, "restart", "one") + realIP = inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + + content = readContainerFileWithExec(c, id, "/etc/hosts") + ip = getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) +} + +func (s *DockerSuite) TestLinksEnvs(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") + out, _ := dockerCmd(c, "run", "--name=second", "--link=first:first", "busybox", "env") + c.Assert(out, checker.Contains, "FIRST_ENV_e1=\n") + c.Assert(out, checker.Contains, "FIRST_ENV_e2=v2") + c.Assert(out, checker.Contains, "FIRST_ENV_e3=v3=v3") +} + +func (s *DockerSuite) TestLinkShortDefinition(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "shortlinkdef", "busybox", "top") + + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), checker.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top") + + cid2 := strings.TrimSpace(out) + c.Assert(waitRun(cid2), checker.IsNil) + + links := inspectFieldJSON(c, cid2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]") +} + +func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top") + out, _, err := dockerCmdWithError("run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true") + + // Running container linking to a container with --net host should have failed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // Running container linking to a container with --net host should have failed + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + // /etc/hosts should be a regular file + c.Assert(out, checker.Matches, "^-.+\n") +} + +func (s *DockerSuite) TestLinksMultipleWithSameName(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=upstream-a", "busybox", "top") + dockerCmd(c, "run", "-d", "--name=upstream-b", "busybox", "top") + dockerCmd(c, "run", "--link", "upstream-a:upstream", "--link", "upstream-b:upstream", "busybox", "sh", "-c", "ping -c 1 upstream") +} diff --git a/integration-cli/docker_cli_links_unix_test.go b/integration-cli/docker_cli_links_unix_test.go new file mode 100644 index 0000000..dbff291 --- /dev/null +++ b/integration-cli/docker_cli_links_unix_test.go @@ -0,0 +1,26 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { + // In a _unix file as using Unix specific files, and must be on the + // same host as the daemon. + testRequires(c, SameHostDaemon, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + c.Skip("/etc/hosts does not exist, skip this test") + } + + c.Assert(out, checker.Equals, string(hosts), check.Commentf("container: %s\n\nhost:%s", out, hosts)) + +} diff --git a/integration-cli/docker_cli_login_test.go b/integration-cli/docker_cli_login_test.go new file mode 100644 index 0000000..94dc037 --- /dev/null +++ b/integration-cli/docker_cli_login_test.go @@ -0,0 +1,30 @@ +package main + +import ( + "bytes" + "os/exec" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { + cmd := exec.Command(dockerBinary, "login") + + // Send to stdin so the process does not get the TTY + cmd.Stdin = bytes.NewBufferString("buffer test string \n") + + // run the command and block until it's done + err := cmd.Run() + c.Assert(err, checker.NotNil) //"Expected non nil err when loginning in & TTY not available" +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) { + // wrong credentials + out, _, err := dockerCmdWithError("login", "-u", s.reg.Username(), "-p", "WRONGPASSWORD", privateRegistryURL) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "401 Unauthorized") + + // now it's fine + dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) +} diff --git a/integration-cli/docker_cli_logout_test.go b/integration-cli/docker_cli_logout_test.go new file mode 100644 index 0000000..5076ceb --- /dev/null +++ b/integration-cli/docker_cli_logout_test.go @@ -0,0 +1,108 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) { + + // @TODO TestLogoutWithExternalAuth expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported + s.d.StartWithBusybox(c, "--disable-legacy-registry=false") + + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmp) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + _, err = s.d.Cmd("--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + c.Assert(err, checker.IsNil) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + c.Assert(string(b), checker.Contains, privateRegistryURL) + + _, err = s.d.Cmd("--config", tmp, "tag", "busybox", repoName) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("--config", tmp, "push", repoName) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("--config", tmp, "logout", privateRegistryURL) + c.Assert(err, checker.IsNil) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), privateRegistryURL) + + // check I cannot pull anymore + out, err := s.d.Cmd("--config", tmp, "pull", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found") +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithWrongHostnamesStored(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + cmd := exec.Command("docker-credential-shell-test", "store") + stdin := bytes.NewReader([]byte(fmt.Sprintf(`{"ServerURL": "https://%s", "Username": "%s", "Secret": "%s"}`, privateRegistryURL, s.reg.Username(), s.reg.Password()))) + cmd.Stdin = stdin + c.Assert(cmd.Run(), checker.IsNil) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := fmt.Sprintf(`{ "auths": {"https://%s": {}}, "credsStore": "shell-test" }`, privateRegistryURL) + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"%s\": {}", privateRegistryURL)) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"%s\": {}", privateRegistryURL)) +} diff --git a/integration-cli/docker_cli_logs_bench_test.go b/integration-cli/docker_cli_logs_bench_test.go new file mode 100644 index 0000000..eeb008d --- /dev/null +++ b/integration-cli/docker_cli_logs_bench_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkLogsCLIRotateFollow(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--log-opt", "max-size=1b", "--log-opt", "max-file=10", "busybox", "sh", "-c", "while true; do usleep 50000; echo hello; done") + id := strings.TrimSpace(out) + ch := make(chan error, 1) + go func() { + ch <- nil + out, _, _ := dockerCmdWithError("logs", "-f", id) + // if this returns at all, it's an error + ch <- fmt.Errorf(out) + }() + + <-ch + select { + case <-time.After(30 * time.Second): + // ran for 30 seconds with no problem + return + case err := <-ch: + if err != nil { + c.Fatal(err) + } + } +} diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go new file mode 100644 index 0000000..a8b8f90 --- /dev/null +++ b/integration-cli/docker_cli_logs_test.go @@ -0,0 +1,307 @@ +package main + +import ( + "fmt" + "io" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { + testLogsContainerPagination(c, 32767) +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { + testLogsContainerPagination(c, 32768) +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { + testLogsContainerPagination(c, 33000) +} + +func testLogsContainerPagination(c *check.C, testLen int) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + out, _ = dockerCmd(c, "logs", id) + c.Assert(out, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsTimestamps(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo = >> a.a; done; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", "-t", id) + + lines := strings.Split(out, "\n") + + c.Assert(lines, checker.HasLen, testLen+1) + + ts := regexp.MustCompile(`^.* `) + + for _, l := range lines { + if l != "" { + _, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l)) + c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l)) + // ensure we have padded 0's + c.Assert(l[29], checker.Equals, uint8('Z')) + } + } +} + +func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { + msg := "stderr_log" + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)).Combined() + id := strings.TrimSpace(out) + cli.DockerCmd(c, "wait", id) + cli.DockerCmd(c, "logs", id).Assert(c, icmd.Expected{ + Out: "", + Err: msg, + }) +} + +func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { + // TODO Windows: Needs investigation why this fails. Obtained string includes + // a bunch of ANSI escape sequences before the "stderr_log" message. + testRequires(c, DaemonIsLinux) + msg := "stderr_log" + out := cli.DockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)).Combined() + id := strings.TrimSpace(out) + cli.DockerCmd(c, "wait", id) + + cli.DockerCmd(c, "logs", id).Assert(c, icmd.Expected{ + Out: msg, + Err: "", + }) +} + +func (s *DockerSuite) TestLogsTail(c *check.C) { + testLen := 100 + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)).Combined() + + id := strings.TrimSpace(out) + cli.DockerCmd(c, "wait", id) + + out = cli.DockerCmd(c, "logs", "--tail", "0", id).Combined() + lines := strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 1) + + out = cli.DockerCmd(c, "logs", "--tail", "5", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 6) + + out = cli.DockerCmd(c, "logs", "--tail", "99", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 100) + + out = cli.DockerCmd(c, "logs", "--tail", "all", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out = cli.DockerCmd(c, "logs", "--tail", "-1", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out = cli.DockerCmd(c, "logs", "--tail", "random", id).Combined() + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "hello") + id := getIDByName(c, "test") + + logsCmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(logsCmd.Start(), checker.IsNil) + + errChan := make(chan error) + go func() { + errChan <- logsCmd.Wait() + close(errChan) + }() + + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Following logs is hanged") + } +} + +func (s *DockerSuite) TestLogsSince(c *check.C) { + name := "testlogssince" + dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo log$i; done") + out, _ := dockerCmd(c, "logs", "-t", name) + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // the timestamp log2 is written + c.Assert(err, checker.IsNil) + since := t.Unix() + 1 // add 1s so log1 & log2 doesn't show up + out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name) + + // Skip 2 seconds + unexpected := []string{"log1", "log2"} + for _, v := range unexpected { + c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", since)) + } + + // Test to make sure a bad since format is caught by the client + out, _, _ = dockerCmdWithError("logs", "-t", "--since=2006-01-02T15:04:0Z", name) + c.Assert(out, checker.Contains, "cannot parse \"0Z\" as \"05\"", check.Commentf("bad since format passed to server")) + + // Test with default value specified and parameter omitted + expected := []string{"log1", "log2", "log3"} + for _, cmd := range [][]string{ + {"logs", "-t", name}, + {"logs", "-t", "--since=0", name}, + } { + result := icmd.RunCommand(dockerBinary, cmd...) + result.Assert(c, icmd.Success) + for _, v := range expected { + c.Assert(result.Combined(), checker.Contains, v) + } + } +} + +func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { + // TODO Windows TP5 - Figure out why this test is so flakey. Disabled for now. + testRequires(c, DaemonIsLinux) + name := "testlogssincefuturefollow" + out, _ := dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do echo log$i; sleep 1; done`) + + // Extract one timestamp from the log file to give us a starting point for + // our `--since` argument. Because the log producer runs in the background, + // we need to check repeatedly for some output to be produced. + var timestamp string + for i := 0; i != 100 && timestamp == ""; i++ { + if out, _ = dockerCmd(c, "logs", "-t", name); out == "" { + time.Sleep(time.Millisecond * 100) // Retry + } else { + timestamp = strings.Split(strings.Split(out, "\n")[0], " ")[0] + } + } + + c.Assert(timestamp, checker.Not(checker.Equals), "") + t, err := time.Parse(time.RFC3339Nano, timestamp) + c.Assert(err, check.IsNil) + + since := t.Unix() + 2 + out, _ = dockerCmd(c, "logs", "-t", "-f", fmt.Sprintf("--since=%v", since), name) + c.Assert(out, checker.Not(checker.HasLen), 0, check.Commentf("cannot read from empty log")) + lines := strings.Split(strings.TrimSpace(out), "\n") + for _, v := range lines { + ts, err := time.Parse(time.RFC3339Nano, strings.Split(v, " ")[0]) + c.Assert(err, checker.IsNil, check.Commentf("cannot parse timestamp output from log: '%v'", v)) + c.Assert(ts.Unix() >= since, checker.Equals, true, check.Commentf("earlier log found. since=%v logdate=%v", since, ts)) + } +} + +// Regression test for #8832 +func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { + // TODO Windows: Fix this test for TP5. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) + + id := strings.TrimSpace(out) + + stopSlowRead := make(chan bool) + + go func() { + exec.Command(dockerBinary, "wait", id).Run() + stopSlowRead <- true + }() + + logCmd := exec.Command(dockerBinary, "logs", "-f", id) + stdout, err := logCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + c.Assert(logCmd.Start(), checker.IsNil) + + // First read slowly + bytes1, err := testutil.ConsumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + c.Assert(err, checker.IsNil) + + // After the container has finished we can continue reading fast + bytes2, err := testutil.ConsumeWithSpeed(stdout, 32*1024, 0, nil) + c.Assert(err, checker.IsNil) + + actual := bytes1 + bytes2 + expected := 200000 + c.Assert(actual, checker.Equals, expected) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + r, w := io.Pipe() + cmd.Stdout = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + chErr := make(chan error) + go func() { + b := make([]byte, 1) + _, err := r.Read(b) + chErr <- err + }() + c.Assert(<-chErr, checker.IsNil) + c.Assert(cmd.Process.Kill(), checker.IsNil) + r.Close() + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(cmd.Start(), checker.IsNil) + time.Sleep(200 * time.Millisecond) + c.Assert(cmd.Process.Kill(), checker.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) { + name := "testlogsnocontainer" + out, _, _ := dockerCmdWithError("logs", name) + message := fmt.Sprintf("No such container: %s\n", name) + c.Assert(out, checker.Contains, message) +} + +func (s *DockerSuite) TestLogsWithDetails(c *check.C) { + dockerCmd(c, "run", "--name=test", "--label", "foo=bar", "-e", "baz=qux", "--log-opt", "labels=foo", "--log-opt", "env=baz", "busybox", "echo", "hello") + out, _ := dockerCmd(c, "logs", "--details", "--timestamps", "test") + + logFields := strings.Fields(strings.TrimSpace(out)) + c.Assert(len(logFields), checker.Equals, 3, check.Commentf(out)) + + details := strings.Split(logFields[1], ",") + c.Assert(details, checker.HasLen, 2) + c.Assert(details[0], checker.Equals, "baz=qux") + c.Assert(details[1], checker.Equals, "foo=bar") +} diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go new file mode 100644 index 0000000..bb6eca1 --- /dev/null +++ b/integration-cli/docker_cli_nat_test.go @@ -0,0 +1,88 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +func startServerContainer(c *check.C, msg string, port int) string { + name := "server" + cmd := []string{ + "run", + "--name", + name, + "-d", + "-p", fmt.Sprintf("%d:%d", port, port), + "busybox", + "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), + } + cli.DockerCmd(c, cmd...) + cli.WaitRun(c, name) + return name +} + +func getExternalAddress(c *check.C) net.IP { + iface, err := net.InterfaceByName("eth0") + if err != nil { + c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) + } + + ifaceAddrs, err := iface.Addrs() + c.Assert(err, check.IsNil) + c.Assert(ifaceAddrs, checker.Not(checker.HasLen), 0) + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + c.Assert(err, check.IsNil) + + return ifaceIP +} + +func (s *DockerSuite) TestNetworkNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + var ( + msg = "hi yall" + ) + startServerContainer(c, msg, 8081) + conn, err := net.Dial("tcp", "localhost:8081") + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", + "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) + final := strings.TrimRight(string(out), "\n") + c.Assert(final, checker.Equals, msg) +} diff --git a/integration-cli/docker_cli_netmode_test.go b/integration-cli/docker_cli_netmode_test.go new file mode 100644 index 0000000..deb8f69 --- /dev/null +++ b/integration-cli/docker_cli_netmode_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +// GH14530. Validates combinations of --net= with other options + +// stringCheckPS is how the output of PS starts in order to validate that +// the command executed in a container did really run PS correctly. +const stringCheckPS = "PID USER" + +// DockerCmdWithFail executes a docker command that is supposed to fail and returns +// the output, the exit code. If the command returns a Nil error, it will fail and +// stop the tests. +func dockerCmdWithFail(c *check.C, args ...string) (string, int) { + out, status, err := dockerCmdWithError(args...) + c.Assert(err, check.NotNil, check.Commentf("%v", out)) + return out, status +} + +func (s *DockerSuite) TestNetHostnameWithNetHost(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) +} + +func (s *DockerSuite) TestNetHostname(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-h=name", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=bridge", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=none", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--net=container:other", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") + c.Assert(out, checker.Contains, "Invalid network mode: invalid container format container:") + + out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") + c.Assert(out, checker.Contains, "network weird not found") +} + +func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictContainerNetworkHostAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeNetHostAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--dns=8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkAndDNS.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--add-host=name:8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHosts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-P", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-p", "8080", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--expose", "8000-9000", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkExposePorts.Error()) +} diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go new file mode 100644 index 0000000..2851da1 --- /dev/null +++ b/integration-cli/docker_cli_network_unix_test.go @@ -0,0 +1,1845 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/driverapi" + remoteapi "github.com/docker/libnetwork/drivers/remote/api" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/docker/libnetwork/netlabel" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +const dummyNetworkDriver = "dummy-network-driver" +const dummyIPAMDriver = "dummy-ipam-driver" + +var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest + +func init() { + check.Suite(&DockerNetworkSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerNetworkSuite struct { + server *httptest.Server + ds *DockerSuite + d *daemon.Daemon +} + +func (s *DockerNetworkSuite) SetUpTest(c *check.C) { + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) +} + +func (s *DockerNetworkSuite) TearDownTest(c *check.C) { + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } +} + +func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIPAMDriver) +} + +func setupRemoteNetworkDrivers(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"local"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func assertNwIsAvailable(c *check.C, name string) { + if !isNwPresent(c, name) { + c.Fatalf("Network %s not found in network ls o/p", name) + } +} + +func assertNwNotAvailable(c *check.C, name string) { + if isNwPresent(c, name) { + c.Fatalf("Found network %s in network ls o/p", name) + } +} + +func isNwPresent(c *check.C, name string) bool { + out, _ := dockerCmd(c, "network", "ls") + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + netFields := strings.Fields(lines[i]) + if netFields[1] == name { + return true + } + } + return false +} + +// assertNwList checks network list retrieved with ls command +// equals to expected network list +// note: out should be `network ls [option]` result +func assertNwList(c *check.C, out string, expectNws []string) { + lines := strings.Split(out, "\n") + var nwList []string + for _, line := range lines[1 : len(lines)-1] { + netFields := strings.Fields(line) + // wrap all network name in nwList + nwList = append(nwList, netFields[1]) + } + + // network ls should contains all expected networks + c.Assert(nwList, checker.DeepEquals, expectNws) +} + +func getNwResource(c *check.C, name string) *types.NetworkResource { + out, _ := dockerCmd(c, "network", "inspect", name) + nr := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &nr) + c.Assert(err, check.IsNil) + return &nr[0] +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + assertNwIsAvailable(c, nn) + } +} + +func (s *DockerSuite) TestNetworkLsFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "network", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge", "host", "none"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestNetworkLsFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + config := `{ + "networksFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "network", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge default", "host default", "none default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be created again + out, _, err := dockerCmdWithError("network", "create", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateHostBind(c *check.C) { + dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + assertNwIsAvailable(c, "testbind") + + out := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, "192.168.10.1:5000->5000/tcp") +} + +func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be removed + out, _, err := dockerCmdWithError("network", "rm", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { + testNet := "testnet1" + testLabel := "foo" + testValue := "bar" + out, _ := dockerCmd(c, "network", "create", "dev") + defer func() { + dockerCmd(c, "network", "rm", "dev") + dockerCmd(c, "network", "rm", testNet) + }() + networkID := strings.TrimSpace(out) + + // filter with partial ID + // only show 'dev' network + out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5]) + assertNwList(c, out, []string{"dev"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "name=dge") + assertNwList(c, out, []string{"bridge"}) + + // only show built-in network (bridge, none, host) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "host", "none"}) + + // only show custom networks (dev) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom") + assertNwList(c, out, []string{"dev"}) + + // show all networks with filter + // it should be equivalent of ls without option + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "dev", "host", "none"}) + + out, _ = dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel+"="+testValue) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label=nonexistent") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=null") + assertNwList(c, out, []string{"none"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=host") + assertNwList(c, out, []string{"host"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=bridge") + assertNwList(c, out, []string{"bridge", "dev", testNet}) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateDelete(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateLabel(c *check.C) { + testNet := "testnetcreatelabel" + testLabel := "foo" + testValue := "bar" + + dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _, err := dockerCmdWithError("network", "inspect", "--format={{ .Labels."+testLabel+" }}", testNet) + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) + + dockerCmd(c, "network", "rm", testNet) + assertNwNotAvailable(c, testNet) +} + +func (s *DockerSuite) TestDockerNetworkDeleteNotExists(c *check.C) { + out, _, err := dockerCmdWithError("network", "rm", "test") + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) +} + +func (s *DockerSuite) TestDockerNetworkDeleteMultiple(c *check.C) { + dockerCmd(c, "network", "create", "testDelMulti0") + assertNwIsAvailable(c, "testDelMulti0") + dockerCmd(c, "network", "create", "testDelMulti1") + assertNwIsAvailable(c, "testDelMulti1") + dockerCmd(c, "network", "create", "testDelMulti2") + assertNwIsAvailable(c, "testDelMulti2") + out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + // delete three networks at the same time, since testDelMulti2 + // contains active container, its deletion should fail. + out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2") + // err should not be nil due to deleting testDelMulti2 failed. + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // testDelMulti2 should fail due to network has active endpoints + c.Assert(out, checker.Contains, "has active endpoints") + assertNwNotAvailable(c, "testDelMulti0") + assertNwNotAvailable(c, "testDelMulti1") + // testDelMulti2 can't be deleted, so it should exist + assertNwIsAvailable(c, "testDelMulti2") +} + +func (s *DockerSuite) TestDockerNetworkInspect(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "host") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Name }}", "host") + c.Assert(strings.TrimSpace(out), check.Equals, "host") +} + +func (s *DockerSuite) TestDockerNetworkInspectWithID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "test2") + networkID := strings.TrimSpace(out) + assertNwIsAvailable(c, "test2") + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Id }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .ID }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) +} + +func (s *DockerSuite) TestDockerInspectMultipleNetwork(c *check.C) { + result := dockerCmdWithResult("network", "inspect", "host", "none") + c.Assert(result, icmd.Matches, icmd.Success) + + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 2) +} + +func (s *DockerSuite) TestDockerInspectMultipleNetworksIncludingNonexistent(c *check.C) { + // non-existent network was not at the beginning of the inspect list + // This should print an error, return an exitCode 1 and print the host network + result := dockerCmdWithResult("network", "inspect", "host", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "host", + }) + + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + + // Only one non-existent network to inspect + // Should print an error and return an exitCode, nothing else + result = dockerCmdWithResult("network", "inspect", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "[]", + }) + + // non-existent network was at the beginning of the inspect list + // Should not fail fast, and still print host network but print an error + result = dockerCmdWithResult("network", "inspect", "nonexistent", "host") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "host", + }) + + networkResources = []types.NetworkResource{} + err = json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) +} + +func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { + dockerCmd(c, "network", "create", "brNetForInspect") + assertNwIsAvailable(c, "brNetForInspect") + defer func() { + dockerCmd(c, "network", "rm", "brNetForInspect") + assertNwNotAvailable(c, "brNetForInspect") + }() + + out, _ := dockerCmd(c, "run", "-d", "--name", "testNetInspect1", "--net", "brNetForInspect", "busybox", "top") + c.Assert(waitRun("testNetInspect1"), check.IsNil) + containerID := strings.TrimSpace(out) + defer func() { + // we don't stop container by name, because we'll rename it later + dockerCmd(c, "stop", containerID) + }() + + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + container, ok := networkResources[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container.Name, checker.Equals, "testNetInspect1") + + // rename container and check docker inspect output update + newName := "HappyNewName" + dockerCmd(c, "rename", "testNetInspect1", newName) + + // check whether network inspect works properly + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + newNetRes := []types.NetworkResource{} + err = json.Unmarshal([]byte(out), &newNetRes) + c.Assert(err, check.IsNil) + c.Assert(newNetRes, checker.HasLen, 1) + container1, ok := newNetRes[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container1.Name, checker.Equals, newName) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + nr := getNwResource(c, "test") + + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + containerID := strings.TrimSpace(out) + + // connect the container to the test network + dockerCmd(c, "network", "connect", "test", containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], check.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, check.IsNil) + containerIP := findContainerIP(c, "test", "test") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + dockerCmd(c, "network", "disconnect", "test", containerID) + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run another container + out, _ = dockerCmd(c, "run", "-d", "--net", "test", "--name", "test2", "busybox", "top") + c.Assert(waitRun("test2"), check.IsNil) + containerID = strings.TrimSpace(out) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 1) + + // force disconnect the container to the test network + dockerCmd(c, "network", "disconnect", "-f", "test", containerID) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { + // test0 bridge network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") + assertNwIsAvailable(c, "test1") + + // test2 bridge network does not overlap + dockerCmd(c, "network", "create", "--subnet=192.169.0.0/16", "test2") + assertNwIsAvailable(c, "test2") + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + dockerCmd(c, "network", "create", "test3") + assertNwIsAvailable(c, "test3") + dockerCmd(c, "network", "create", "test4") + assertNwIsAvailable(c, "test4") + dockerCmd(c, "network", "create", "test5") + assertNwIsAvailable(c, "test5") + + // test network with multiple subnets + // bridge network doesn't support multiple subnets. hence, use a dummy driver that supports + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6") + assertNwIsAvailable(c, "test6") + + // test network with multiple subnets with valid ipam combinations + // also check same subnet across networks when the driver supports it. + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, + "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", + "--gateway=192.168.0.100", "--gateway=192.170.0.100", + "--ip-range=192.168.1.0/24", + "--aux-address", "a=192.168.1.5", "--aux-address", "b=192.168.1.6", + "--aux-address", "c=192.170.1.5", "--aux-address", "d=192.170.1.6", + "test7") + assertNwIsAvailable(c, "test7") + + // cleanup + for i := 1; i < 8; i++ { + dockerCmd(c, "network", "rm", fmt.Sprintf("test%d", i)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { + // Create a bridge network using custom ipam driver + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam fields are there + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.IPAM.Driver, checker.Equals, dummyIPAMDriver) + + // remove network and exercise remote ipam driver + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "br0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { + // Create a bridge network using custom ipam driver and options + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam options + nr := getNetworkResource(c, "br0") + opts := nr.IPAM.Options + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) { + nr := getNetworkResource(c, "none") + c.Assert(nr.Driver, checker.Equals, "null") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "host") + c.Assert(nr.Driver, checker.Equals, "host") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "bridge") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) { + // if unspecified, network subnet will be selected from inside preferred pool + dockerCmd(c, "network", "create", "test01") + assertNwIsAvailable(c, "test01") + + nr := getNetworkResource(c, "test01") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) + + dockerCmd(c, "network", "rm", "test01") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--ipv6", "--subnet=fd80:24e2:f998:72d6::/64", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0") + assertNwIsAvailable(c, "br0") + + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, true) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 2) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Internal, checker.False) + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMInvalidCombinations(c *check.C) { + // network with ip-range out of subnet range + _, _, err := dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--ip-range=192.170.0.0/16", "test") + c.Assert(err, check.NotNil) + + // network with multiple gateways for a single subnet + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test") + c.Assert(err, check.NotNil) + + // Multiple overlapping subnets in the same network must fail + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test") + c.Assert(err, check.NotNil) + + // overlapping subnets across networks must fail + // create a valid test0 network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test0") + assertNwIsAvailable(c, "test0") + // create an overlapping test1 network + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1") + c.Assert(err, check.NotNil) + dockerCmd(c, "network", "rm", "test0") + assertNwNotAvailable(c, "test0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") + assertNwIsAvailable(c, "testopt") + gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] + c.Assert(gopts, checker.NotNil) + opts, ok := gopts.(map[string]interface{}) + c.Assert(ok, checker.Equals, true) + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") + dockerCmd(c, "network", "rm", "testopt") + assertNwNotAvailable(c, "testopt") + +} + +func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) { + c.Skip("Plugins aren't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + var ( + npName = "tiborvass/test-docker-netplugin" + npTag = "latest" + npNameWithTag = npName + ":" + npTag + ) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npName) + c.Assert(out, checker.Contains, npTag) + c.Assert(out, checker.Contains, "true") + + dockerCmd(c, "network", "create", "-d", npNameWithTag, "v2net") + assertNwIsAvailable(c, "v2net") + dockerCmd(c, "network", "rm", "v2net") + assertNwNotAvailable(c, "v2net") + +} + +func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *check.C) { + testRequires(c, ExecSupport) + // On default bridge network built-in service discovery should not happen + hostsFile := "/etc/hosts" + bridgeName := "external-bridge" + bridgeIP := "192.169.255.254/24" + createInterface(c, "bridge", bridgeName, bridgeIP) + defer deleteInterface(c, bridgeName) + + s.d.StartWithBusybox(c, "--bridge", bridgeName) + defer s.d.Restart(c) + + // run two containers and store first container's etc/hosts content + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + cid1 := strings.TrimSpace(out) + defer s.d.Cmd("stop", cid1) + + hosts, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("run", "-d", "--name", "container2", "busybox", "top") + c.Assert(err, check.IsNil) + cid2 := strings.TrimSpace(out) + + // verify first container's etc/hosts file has not changed after spawning the second named container + hostsPost, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // stop container 2 and verify first container's etc/hosts has not changed + _, err = s.d.Cmd("stop", cid2) + c.Assert(err, check.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // but discovery is on when connecting to non default bridge network + network := "anotherbridge" + out, err = s.d.Cmd("network", "create", network) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer s.d.Cmd("network", "rm", network) + + out, err = s.d.Cmd("network", "connect", network, cid1) + c.Assert(err, check.IsNil, check.Commentf(out)) + + hosts, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second network connection", hostsFile)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { + testRequires(c, ExecSupport, NotArm) + hostsFile := "/etc/hosts" + cstmBridgeNw := "custom-bridge-nw" + cstmBridgeNw1 := "custom-bridge-nw1" + + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw) + assertNwIsAvailable(c, cstmBridgeNw) + + // run two anonymous containers and store their etc/hosts content + out, _ := dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid1 := strings.TrimSpace(out) + + hosts1 := readContainerFileWithExec(c, cid1, hostsFile) + + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid2 := strings.TrimSpace(out) + + hosts2 := readContainerFileWithExec(c, cid2, hostsFile) + + // verify first container etc/hosts file has not changed + hosts1post := readContainerFileWithExec(c, cid1, hostsFile) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on anonymous container creation", hostsFile)) + + // Connect the 2nd container to a new network and verify the + // first container /etc/hosts file still hasn't changed. + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw1) + assertNwIsAvailable(c, cstmBridgeNw1) + + dockerCmd(c, "network", "connect", cstmBridgeNw1, cid2) + + hosts2 = readContainerFileWithExec(c, cid2, hostsFile) + hosts1post = readContainerFileWithExec(c, cid1, hostsFile) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on container connect", hostsFile)) + + // start a named container + cName := "AnyName" + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "--name", cName, "busybox", "top") + cid3 := strings.TrimSpace(out) + + // verify that container 1 and 2 can ping the named container + dockerCmd(c, "exec", cid1, "ping", "-c", "1", cName) + dockerCmd(c, "exec", cid2, "ping", "-c", "1", cName) + + // Stop named container and verify first two containers' etc/hosts file hasn't changed + dockerCmd(c, "stop", cid3) + hosts1post = readContainerFileWithExec(c, cid1, hostsFile) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + hosts2post := readContainerFileWithExec(c, cid2, hostsFile) + c.Assert(string(hosts2), checker.Equals, string(hosts2post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + // verify that container 1 and 2 can't ping the named container now + _, _, err := dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", cid2, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *check.C) { + // Legacy Link feature must work only on default network, and not across networks + cnt1 := "container1" + cnt2 := "container2" + network := "anotherbridge" + + // Run first container on default network + dockerCmd(c, "run", "-d", "--name", cnt1, "busybox", "top") + + // Create another network and run the second container on it + dockerCmd(c, "network", "create", network) + assertNwIsAvailable(c, network) + dockerCmd(c, "run", "-d", "--net", network, "--name", cnt2, "busybox", "top") + + // Try launching a container on default network, linking to the first container. Must succeed + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt1, cnt1), "busybox", "top") + + // Try launching a container on default network, linking to the second container. Must fail + _, _, err := dockerCmdWithError("run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") + c.Assert(err, checker.NotNil) + + // Connect second container to default network. Now a container on default network can link to it + dockerCmd(c, "network", "connect", "bridge", cnt2) + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") +} + +func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { + // Verify exposed ports are present in ps output when running a container on + // a network managed by a driver which does not provide the default gateway + // for the container + nwn := "ov" + ctn := "bb" + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, expose1, expose2, "busybox", "top") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dnd := "dnd" + did := "did" + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + s.d.StartWithBusybox(c) + _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") + c.Assert(err, checker.IsNil) + + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "foo", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) + + // Kill daemon and restart + c.Assert(s.d.Kill(), checker.IsNil) + + server.Close() + + startTime := time.Now().Unix() + s.d.Restart(c) + lapse := time.Now().Unix() - startTime + if lapse > 60 { + // In normal scenarios, daemon restart takes ~1 second. + // Plugin retry mechanism can delay the daemon start. systemd may not like it. + // Avoid accessing plugins during daemon bootup + c.Logf("daemon restart took too long : %d seconds", lapse) + } + + // Restart the custom dummy plugin + mux = http.NewServeMux() + server = httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + // trying to reuse the same ip must succeed + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "bar", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { + // Verify endpoint MAC address is correctly populated in container's network settings + nwn := "ov" + ctn := "bb" + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, "busybox", "top") + + mac := inspectField(c, ctn, "NetworkSettings.Networks."+nwn+".MacAddress") + c.Assert(mac, checker.Equals, "a0:b1:c2:d3:e4:f5") +} + +func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "mybridge1") + dockerCmd(c, "network", "create", "mybridge2") + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "network", "connect", "mybridge1", id) + dockerCmd(c, "network", "connect", "mybridge2", id) + + body := getInspectBody(c, "v1.20", id) + var inspect120 v1p20.ContainerJSON + err := json.Unmarshal(body, &inspect120) + c.Assert(err, checker.IsNil) + + versionedIP := inspect120.NetworkSettings.IPAddress + + body = getInspectBody(c, "v1.21", id) + var inspect121 types.ContainerJSON + err = json.Unmarshal(body, &inspect121) + c.Assert(err, checker.IsNil) + c.Assert(inspect121.NetworkSettings.Networks, checker.HasLen, 3) + + bridge := inspect121.NetworkSettings.Networks["bridge"] + c.Assert(bridge.IPAddress, checker.Equals, versionedIP) + c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) +} + +func connectContainerToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { + // Run a container on the default network + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach the container to other networks + for _, nw := range nws { + out, err = d.Cmd("network", "create", nw) + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("network", "connect", nw, cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } +} + +func verifyContainerIsConnectedToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { + // Verify container is connected to all the networks + for _, nw := range nws { + out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Equals), "\n") + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { + cName := "bb" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox(c) + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Reload daemon + s.d.Restart(c) + + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { + cName := "cc" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox(c) + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Kill daemon and restart + c.Assert(s.d.Kill(), checker.IsNil) + s.d.Restart(c) + + // Restart container + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "one") + containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(containerOut)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + s.d.StartWithBusybox(c) + + // Run a few containers on host network + for i := 0; i < 10; i++ { + cName := fmt.Sprintf("hostc-%d", i) + out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // verfiy container has finished starting before killing daemon + err = s.d.WaitRun(cName) + c.Assert(err, checker.IsNil) + } + + // Kill daemon ungracefully and restart + c.Assert(s.d.Kill(), checker.IsNil) + s.d.Restart(c) + + // make sure all the containers are up and running + for i := 0; i < 10; i++ { + err := s.d.WaitRun(fmt.Sprintf("hostc-%d", i)) + c.Assert(err, checker.IsNil) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectToHostFromOtherNetwork(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + dockerCmd(c, "network", "disconnect", "bridge", "container1") + out, _, err := dockerCmdWithError("network", "connect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromHost(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "--net=host", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + out, _, err := dockerCmdWithError("network", "disconnect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf("Should err out disconnect from host")) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithPortMapping(c *check.C) { + testRequires(c, NotArm) + dockerCmd(c, "network", "create", "test1") + dockerCmd(c, "run", "-d", "--name", "c1", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + dockerCmd(c, "network", "connect", "test1", "c1") +} + +func verifyPortMap(c *check.C, container, port, originalMapping string, mustBeEqual bool) { + chk := checker.Equals + if !mustBeEqual { + chk = checker.Not(checker.Equals) + } + currentMapping, _ := dockerCmd(c, "port", container, port) + c.Assert(currentMapping, chk, originalMapping) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectWithPortMapping(c *check.C) { + // Connect and disconnect a container with explicit and non-explicit + // host port mapping to/from networks which do cause and do not cause + // the container default gateway to change, and verify docker port cmd + // returns congruent information + testRequires(c, NotArm) + cnt := "c1" + dockerCmd(c, "network", "create", "aaa") + dockerCmd(c, "network", "create", "ccc") + + dockerCmd(c, "run", "-d", "--name", cnt, "-p", "9000:90", "-p", "70", "busybox", "top") + c.Assert(waitRun(cnt), check.IsNil) + curPortMap, _ := dockerCmd(c, "port", cnt, "70") + curExplPortMap, _ := dockerCmd(c, "port", cnt, "90") + + // Connect to a network which causes the container's default gw switch + dockerCmd(c, "network", "connect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Disconnect from a network which causes the container's default gw switch + dockerCmd(c, "network", "disconnect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Connect to a network which does not cause the container's default gw switch + dockerCmd(c, "network", "connect", "ccc", cnt) + verifyPortMap(c, cnt, "70", curPortMap, true) + verifyPortMap(c, cnt, "90", curExplPortMap, true) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithMac(c *check.C) { + macAddress := "02:42:ac:11:00:02" + dockerCmd(c, "network", "create", "mynetwork") + dockerCmd(c, "run", "--name=test", "-d", "--mac-address", macAddress, "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + mac1 := inspectField(c, "test", "NetworkSettings.Networks.bridge.MacAddress") + c.Assert(strings.TrimSpace(mac1), checker.Equals, macAddress) + dockerCmd(c, "network", "connect", "mynetwork", "test") + mac2 := inspectField(c, "test", "NetworkSettings.Networks.mynetwork.MacAddress") + c.Assert(strings.TrimSpace(mac2), checker.Not(checker.Equals), strings.TrimSpace(mac1)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCreatedContainer(c *check.C) { + dockerCmd(c, "create", "--name", "test", "busybox") + networks := inspectField(c, "test", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should return 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + dockerCmd(c, "network", "connect", "test", "foo") + dockerCmd(c, "restart", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should contain 'bridge' network")) + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "create", "--name=foo", "busybox", "top") + dockerCmd(c, "network", "connect", "test", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart(c) + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // start the container and test if we can ping it from another container in the same network + dockerCmd(c, "start", "foo") + c.Assert(waitRun("foo"), checker.IsNil) + ip := inspectField(c, "foo", "NetworkSettings.Networks.test.IPAddress") + ip = strings.TrimSpace(ip) + dockerCmd(c, "run", "--net=test", "busybox", "sh", "-c", fmt.Sprintf("ping -c 1 %s", ip)) + + dockerCmd(c, "stop", "foo") + + // Test disconnect + dockerCmd(c, "network", "disconnect", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart(c) + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectContainerNonexistingNetwork(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--net=test", "-d", "--name=foo", "busybox", "top") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Stop container and remove network + dockerCmd(c, "stop", "foo") + dockerCmd(c, "network", "rm", "test") + + // Test disconnecting stopped container from nonexisting network + dockerCmd(c, "network", "disconnect", "-f", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { + // create two networks + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "n0") + assertNwIsAvailable(c, "n0") + + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--ip-range=172.30.5.0/24", "--subnet=2001:db8:abcd::/64", "--ip-range=2001:db8:abcd::/80", "n1") + assertNwIsAvailable(c, "n1") + + // run a container on first network specifying the ip addresses + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + + // connect the container to the second network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n1", "c0") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Stop and restart the container + dockerCmd(c, "stop", "c0") + dockerCmd(c, "start", "c0") + + // verify requested addresses are applied and configs are still there + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Still it should fail to connect to the default network with a specified IP (whatever ip) + out, _, err := dockerCmdWithError("network", "connect", "--ip", "172.21.55.44", "bridge", "c0") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIPStoppedContainer(c *check.C) { + // create a container + dockerCmd(c, "create", "--name", "c0", "busybox", "top") + + // create a network + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--subnet=2001:db8:abcd::/64", "n0") + assertNwIsAvailable(c, "n0") + + // connect the container to the network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n0", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // start the container, verify config has not changed and ip addresses are assigned + dockerCmd(c, "start", "c0") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // stop the container and check ip config has not changed + dockerCmd(c, "stop", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") +} + +func (s *DockerNetworkSuite) TestDockerNetworkUnsupportedRequiredIP(c *check.C) { + // requested IP is not supported on predefined networks + for _, mode := range []string{"none", "host", "bridge", "default"} { + checkUnsupportedNetworkAndIP(c, mode) + } + + // requested IP is not supported on networks with no user defined subnets + dockerCmd(c, "network", "create", "n0") + assertNwIsAvailable(c, "n0") + + out, _, err := dockerCmdWithError("run", "-d", "--ip", "172.28.99.88", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + out, _, err = dockerCmdWithError("run", "-d", "--ip6", "2001:db8:1234::9988", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + dockerCmd(c, "network", "rm", "n0") + assertNwNotAvailable(c, "n0") +} + +func checkUnsupportedNetworkAndIP(c *check.C, nwMode string) { + out, _, err := dockerCmdWithError("run", "-d", "--net", nwMode, "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) +} + +func verifyIPAddressConfig(c *check.C, cName, nwname, ipv4, ipv6 string) { + if ipv4 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv4Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + } + + if ipv6 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) + } +} + +func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAddress", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + + out = inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.GlobalIPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectLinkLocalIP(c *check.C) { + // create one test network + dockerCmd(c, "network", "create", "--ipv6", "--subnet=2001:db8:1234::/64", "n0") + assertNwIsAvailable(c, "n0") + + // run a container with incorrect link-local address + _, _, err := dockerCmdWithError("run", "--link-local-ip", "169.253.5.5", "busybox", "top") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("run", "--link-local-ip", "2001:db8::89", "busybox", "top") + c.Assert(err, check.NotNil) + + // run two containers with link-local ip on the test network + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--link-local-ip", "169.254.7.7", "--link-local-ip", "fe80::254:77", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + dockerCmd(c, "run", "-d", "--name", "c1", "--net=n0", "--link-local-ip", "169.254.8.8", "--link-local-ip", "fe80::254:88", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + + // run a container on the default network and connect it to the test network specifying a link-local address + dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + c.Assert(waitRun("c2"), check.IsNil) + dockerCmd(c, "network", "connect", "--link-local-ip", "169.254.9.9", "n0", "c2") + + // verify the three containers can ping each other via the link-local addresses + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) + + // Stop and restart the three containers + dockerCmd(c, "stop", "c0") + dockerCmd(c, "stop", "c1") + dockerCmd(c, "stop", "c2") + dockerCmd(c, "start", "c0") + dockerCmd(c, "start", "c1") + dockerCmd(c, "start", "c2") + + // verify the ping again + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectLink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "foo1") + dockerCmd(c, "network", "create", "-d", "bridge", "foo2") + + dockerCmd(c, "run", "-d", "--net=foo1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in a user-defined network with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=foo1", "--name=second", "--link=first:FirstInFoo1", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias FirstInFoo1 must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.IsNil) + + // connect first container to foo2 network + dockerCmd(c, "network", "connect", "foo2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "--link=first:FirstInFoo2", "foo2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) + + // disconnect first container from foo1 network + dockerCmd(c, "network", "disconnect", "foo1", "first") + + // link in foo1 network must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.NotNil) + + // link in foo2 network must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *check.C) { + netWorkName1 := "test1" + netWorkName2 := "test2" + containerName := "foo" + + dockerCmd(c, "network", "create", netWorkName1) + dockerCmd(c, "network", "create", netWorkName2) + dockerCmd(c, "create", "--name", containerName, "busybox", "top") + dockerCmd(c, "network", "connect", netWorkName1, containerName) + dockerCmd(c, "network", "connect", netWorkName2, containerName) + dockerCmd(c, "network", "disconnect", "bridge", containerName) + + dockerCmd(c, "start", containerName) + c.Assert(waitRun(containerName), checker.IsNil) + networks := inspectField(c, containerName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netWorkName1, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName1))) + c.Assert(networks, checker.Contains, netWorkName2, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName2))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + containerID := strings.TrimSpace(out) + for _, net := range defaults { + res, _, err := dockerCmdWithError("network", "connect", "--alias", "alias"+net, net, containerID) + c.Assert(err, checker.NotNil) + c.Assert(res, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + dockerCmd(c, "network", "create", "-d", "bridge", "net2") + + cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping first container and its alias + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // connect first container to net2 network + dockerCmd(c, "network", "connect", "--alias=bar", "net2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "net2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + + // disconnect first container from net1 network + dockerCmd(c, "network", "disconnect", "net1", "first") + + // ping to net1 scoped alias "foo" must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.NotNil) + + // ping to net2 scoped alias "bar" must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + // ping to net2 scoped alias short-id must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // verify the alias option is rejected when running on predefined network + out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + + // verify the alias option is rejected when connecting to predefined network + out, _, err = dockerCmdWithError("network", "connect", "--alias=any", "bridge", "first") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") + c.Assert(waitRun("c1.net1"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") + c.Assert(waitRun("c2.net1"), check.IsNil) + + // ping first container by its unqualified name + _, _, err := dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1") + c.Assert(err, check.IsNil) + + // ping first container by its qualified name + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1") + c.Assert(err, check.IsNil) + + // ping with first qualified name masked by an additional domain. should fail + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1.google.com") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestEmbeddedDNSInvalidInput(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "nw1") + + // Sending garbage to embedded DNS shouldn't crash the daemon + dockerCmd(c, "run", "-i", "--net=nw1", "--name=c1", "debian:jessie", "bash", "-c", "echo InvalidQuery > /dev/udp/127.0.0.11/53") +} + +func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { + dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") + c.Assert(waitRun("bb"), check.IsNil) + + ns0 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + + // A failing redundant network connect should not alter current container's endpoint settings + _, _, err := dockerCmdWithError("network", "connect", "bridge", "bb") + c.Assert(err, check.NotNil) + + ns1 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + c.Assert(ns1, check.Equals, ns0) +} + +func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--internal", "internal") + assertNwIsAvailable(c, "internal") + nr := getNetworkResource(c, "internal") + c.Assert(nr.Internal, checker.True) + + dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + out, _, err := dockerCmdWithError("exec", "first", "ping", "-W", "4", "-c", "1", "www.google.com") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "ping: bad address") + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +// Test for #21401 +func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *check.C) { + dockerCmd(c, "network", "create", "test@#$") + assertNwIsAvailable(c, "test@#$") + dockerCmd(c, "network", "rm", "test@#$") + assertNwNotAvailable(c, "test@#$") + + dockerCmd(c, "network", "create", "kiwl$%^") + assertNwIsAvailable(c, "kiwl$%^") + dockerCmd(c, "network", "rm", "kiwl$%^") + assertNwNotAvailable(c, "kiwl$%^") +} + +func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) { + testRequires(t, DaemonIsLinux) + s.d.StartWithBusybox(t, "--live-restore") + defer s.d.Stop(t) + oldCon := "old" + + _, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top") + if err != nil { + t.Fatal(err) + } + oldContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", oldCon) + if err != nil { + t.Fatal(err) + } + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // restart the daemon + s.d.Start(t, "--live-restore") + + // start a new container, the new container's ip should not be the same with + // old running container. + newCon := "new" + _, err = s.d.Cmd("run", "-d", "--name", newCon, "busybox", "top") + if err != nil { + t.Fatal(err) + } + newContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", newCon) + if err != nil { + t.Fatal(err) + } + if strings.Compare(strings.TrimSpace(oldContainerIP), strings.TrimSpace(newContainerIP)) == 0 { + t.Fatalf("new container ip should not equal to old running container ip") + } + + // start a new container, the new container should ping old running container + _, err = s.d.Cmd("run", "-t", "busybox", "ping", "-c", "1", oldContainerIP) + if err != nil { + t.Fatal(err) + } + + // start a new container, trying to publish port 80:80 should fail + out, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err == nil || !strings.Contains(out, "Bind for 0.0.0.0:80 failed: port is already allocated") { + t.Fatalf("80 port is allocated to old running container, it should failed on allocating to new container") + } + + // kill old running container and try to allocate again + _, err = s.d.Cmd("kill", oldCon) + if err != nil { + t.Fatal(err) + } + id, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err != nil { + t.Fatal(err) + } + + // Cleanup because these containers will not be shut down by daemon + out, err = s.d.Cmd("stop", newCon) + if err != nil { + t.Fatalf("err: %v %v", err, string(out)) + } + _, err = s.d.Cmd("stop", strings.TrimSpace(id)) + if err != nil { + t.Fatal(err) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkFlagAlias(c *check.C) { + dockerCmd(c, "network", "create", "user") + output, status := dockerCmd(c, "run", "--rm", "--network=user", "--network-alias=foo", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--net=user", "--network=user", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--network=user", "--net-alias=foo", "--network-alias=bar", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkValidateIP(c *check.C) { + _, _, err := dockerCmdWithError("network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "mynet") + c.Assert(err, check.IsNil) + assertNwIsAvailable(c, "mynet") + + _, _, err = dockerCmdWithError("run", "-d", "--name", "mynet0", "--net=mynet", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, check.IsNil) + c.Assert(waitRun("mynet0"), check.IsNil) + verifyIPAddressConfig(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "mynet_ip", "--ip6", "2001:db8:1234::9999", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv4 address") + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "172.28.99.99", "--ip6", "mynet_ip6", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a case of IPv4 address to `--ip6` + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a special case of an IPv4-mapped IPv6 address + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "::ffff:172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") +} + +// Test case for 26220 +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "--format", "{{.Id}}", "bridge") + + network := strings.TrimSpace(out) + + name := "test" + dockerCmd(c, "create", "--name", name, "busybox", "top") + + _, _, err := dockerCmdWithError("network", "disconnect", network, name) + c.Assert(err, check.IsNil) +} + +// TestConntrackFlowsLeak covers the failure scenario of ticket: https://github.com/docker/docker/issues/8795 +// Validates that conntrack is correctly cleaned once a container is destroyed +func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) { + testRequires(c, IsAmd64, DaemonIsLinux, Network) + + // Create a new network + cli.DockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + assertNwIsAvailable(c, "testbind") + + // Launch the server, this will remain listening on an exposed port and reply to any request in a ping/pong fashion + cmd := "while true; do echo hello | nc -w 1 -lu 8080; done" + cli.DockerCmd(c, "run", "-d", "--name", "server", "--net", "testbind", "-p", "8080:8080/udp", "appropriate/nc", "sh", "-c", cmd) + + // Launch a container client, here the objective is to create a flow that is natted in order to expose the bug + cmd = "echo world | nc -q 1 -u 192.168.10.1 8080" + cli.DockerCmd(c, "run", "-d", "--name", "client", "--net=host", "appropriate/nc", "sh", "-c", cmd) + + // Get all the flows using netlink + flows, err := netlink.ConntrackTableList(netlink.ConntrackTable, syscall.AF_INET) + c.Assert(err, check.IsNil) + var flowMatch int + for _, flow := range flows { + // count only the flows that we are interested in, skipping others that can be laying around the host + if flow.Forward.Protocol == syscall.IPPROTO_UDP && + flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) && + flow.Forward.DstPort == 8080 { + flowMatch++ + } + } + // The client should have created only 1 flow + c.Assert(flowMatch, checker.Equals, 1) + + // Now delete the server, this will trigger the conntrack cleanup + cli.DockerCmd(c, "rm", "-fv", "server") + + // Fetch again all the flows and validate that there is no server flow in the conntrack laying around + flows, err = netlink.ConntrackTableList(netlink.ConntrackTable, syscall.AF_INET) + c.Assert(err, check.IsNil) + flowMatch = 0 + for _, flow := range flows { + if flow.Forward.Protocol == syscall.IPPROTO_UDP && + flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) && + flow.Forward.DstPort == 8080 { + flowMatch++ + } + } + // All the flows have to be gone + c.Assert(flowMatch, checker.Equals, 0) +} diff --git a/integration-cli/docker_cli_oom_killed_test.go b/integration-cli/docker_cli_oom_killed_test.go new file mode 100644 index 0000000..54c34d2 --- /dev/null +++ b/integration-cli/docker_cli_oom_killed_test.go @@ -0,0 +1,30 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectOomKilledTrue(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + _, exitCode, _ := dockerCmdWithError("run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + + c.Assert(exitCode, checker.Equals, 137, check.Commentf("OOM exit should be 137")) + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "true") +} + +func (s *DockerSuite) TestInspectOomKilledFalse(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + dockerCmd(c, "run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "echo hello world") + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "false") +} diff --git a/integration-cli/docker_cli_pause_test.go b/integration-cli/docker_cli_pause_test.go new file mode 100644 index 0000000..5822329 --- /dev/null +++ b/integration-cli/docker_cli_pause_test.go @@ -0,0 +1,67 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPause(c *check.C) { + testRequires(c, IsPausable) + + name := "testeventpause" + runSleepingContainer(c, "-d", "--name", name) + + cli.DockerCmd(c, "pause", name) + pausedContainers := strings.Fields( + cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined(), + ) + c.Assert(len(pausedContainers), checker.Equals, 1) + + cli.DockerCmd(c, "unpause", name) + + out := cli.DockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)).Combined() + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") +} + +func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { + testRequires(c, IsPausable) + + containers := []string{ + "testpausewithmorecontainers1", + "testpausewithmorecontainers2", + } + for _, name := range containers { + runSleepingContainer(c, "-d", "--name", name) + } + cli.DockerCmd(c, append([]string{"pause"}, containers...)...) + pausedContainers := strings.Fields( + cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined(), + ) + c.Assert(len(pausedContainers), checker.Equals, len(containers)) + + cli.DockerCmd(c, append([]string{"unpause"}, containers...)...) + + out := cli.DockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)).Combined() + events := strings.Split(strings.TrimSpace(out), "\n") + + for _, name := range containers { + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") + } +} + +func (s *DockerSuite) TestPauseFailsOnWindowsServerContainers(c *check.C) { + testRequires(c, DaemonIsWindows, NotPausable) + runSleepingContainer(c, "-d", "--name=test") + out, _, _ := dockerCmdWithError("pause", "test") + c.Assert(out, checker.Contains, "cannot pause Windows Server Containers") +} diff --git a/integration-cli/docker_cli_plugins_logdriver_test.go b/integration-cli/docker_cli_plugins_logdriver_test.go new file mode 100644 index 0000000..e308699 --- /dev/null +++ b/integration-cli/docker_cli_plugins_logdriver_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPluginLogDriver(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, IsAmd64, DaemonIsLinux) + + pluginName := "cpuguy83/docker-logdriver-test:latest" + + dockerCmd(c, "plugin", "install", pluginName) + dockerCmd(c, "run", "--log-driver", pluginName, "--name=test", "busybox", "echo", "hello") + out, _ := dockerCmd(c, "logs", "test") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + dockerCmd(c, "start", "-a", "test") + out, _ = dockerCmd(c, "logs", "test") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nhello") + + dockerCmd(c, "rm", "test") + dockerCmd(c, "plugin", "disable", pluginName) + dockerCmd(c, "plugin", "rm", pluginName) +} + +// Make sure log drivers are listed in info, and v2 plugins are not. +func (s *DockerSuite) TestPluginLogDriverInfoList(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, IsAmd64, DaemonIsLinux) + pluginName := "cpuguy83/docker-logdriver-test" + + dockerCmd(c, "plugin", "install", pluginName) + status, body, err := request.SockRequest("GET", "/info", nil, daemonHost()) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + var info types.Info + err = json.Unmarshal(body, &info) + c.Assert(err, checker.IsNil) + drivers := strings.Join(info.Plugins.Log, " ") + c.Assert(drivers, checker.Contains, "json-file") + c.Assert(drivers, checker.Not(checker.Contains), pluginName) +} diff --git a/integration-cli/docker_cli_plugins_test.go b/integration-cli/docker_cli_plugins_test.go new file mode 100644 index 0000000..ab4570d --- /dev/null +++ b/integration-cli/docker_cli_plugins_test.go @@ -0,0 +1,518 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +var ( + pluginProcessName = "sample-volume-plugin" + pName = "tiborvass/sample-volume-plugin" + npName = "tiborvass/test-docker-netplugin" + pTag = "latest" + pNameWithTag = pName + ":" + pTag + npNameWithTag = npName + ":" + pTag +) + +func (s *DockerSuite) TestPluginBasicOps(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + id = strings.TrimSpace(id) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + _, err = os.Stat(filepath.Join(testEnv.DockerBasePath(), "plugins", id)) + if !os.IsNotExist(err) { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestPluginForceRemove(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + out, _, err = dockerCmdWithError("plugin", "remove", "--force", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActive(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("volume", "create", "-d", pNameWithTag, "--name", "testvol1") + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(out, checker.Contains, "in use") + + _, _, err = dockerCmdWithError("volume", "rm", "testvol1") + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActiveNetwork(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("network", "create", "-d", npNameWithTag, "test") + c.Assert(err, checker.IsNil) + + nID := strings.TrimSpace(out) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is in use") + + _, _, err = dockerCmdWithError("network", "rm", nID) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npNameWithTag) +} + +func (s *DockerSuite) TestPluginInstallDisable(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "false") + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) +} + +func (s *DockerSuite) TestPluginInstallDisableVolumeLs(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + dockerCmd(c, "volume", "ls") +} + +func (s *DockerSuite) TestPluginSet(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") + + dockerCmd(c, "plugin", "set", pName, "DEBUG=1") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName, "DEBUG=1") + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64) + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) + + out, _, err := dockerCmdWithError("plugin", "install", repoName) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, `Encountered remote "application/vnd.docker.container.image.v1+json"(image) when fetching`) +} + +func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already disabled") + + _, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSuite) TestPluginCreate(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + name := "foo/bar-driver" + temp, err := ioutil.TempDir("", "foo") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(temp) + + data := `{"description": "foo plugin"}` + err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644) + c.Assert(err, checker.IsNil) + + err = os.MkdirAll(filepath.Join(temp, "rootfs"), 0700) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "already exist") + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + // The output will consists of one HEADER line and one line of foo/bar-driver + c.Assert(len(strings.Split(strings.TrimSpace(out), "\n")), checker.Equals, 2) +} + +func (s *DockerSuite) TestPluginInspect(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + // Find the ID first + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + c.Assert(id, checker.Not(checker.Equals), "") + + // Long form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Short form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name with tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name without tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + // After remove nothing should be found + _, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.NotNil) +} + +// Test case for https://github.com/docker/docker/pull/29186#discussion_r91277345 +func (s *DockerSuite) TestPluginInspectOnWindows(c *check.C) { + c.Skip("Plugin isn't supported") + + // This test should work on Windows only + testRequires(c, DaemonIsWindows) + + out, _, err := dockerCmdWithError("plugin", "inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "plugins are not supported on this platform") + c.Assert(err.Error(), checker.Contains, "plugins are not supported on this platform") +} + +func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install") + + cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, icmd.Expected{ + Out: trustedName, + }) + + out := cli.DockerCmd(c, "plugin", "ls").Combined() + c.Assert(out, checker.Contains, "true") + + out = cli.DockerCmd(c, "plugin", "disable", trustedName).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out = cli.DockerCmd(c, "plugin", "enable", trustedName).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out = cli.DockerCmd(c, "plugin", "rm", "-f", trustedName).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + // Try untrusted pull to ensure we pushed the tag to the registry + cli.Docker(cli.Args("plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, SuccessDownloaded) + + out = cli.DockerCmd(c, "plugin", "ls").Combined() + c.Assert(out, checker.Contains, "true") + +} + +func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL) + // install locally and push to private registry + cli.DockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) + cli.DockerCmd(c, "plugin", "push", pluginName) + cli.DockerCmd(c, "plugin", "rm", "-f", pluginName) + + // Try trusted install on untrusted plugin + cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", pluginName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: remote trust data does not exist", + }) +} + +func (s *DockerSuite) TestPluginIDPrefix(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--disable", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + // Find ID first + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + id = strings.TrimSpace(id) + c.Assert(err, checker.IsNil) + + // List current state + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "false") + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", id[:5]) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") + + dockerCmd(c, "plugin", "set", id[:5], "DEBUG=1") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", id[:5]) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") + + // Enable + _, _, err = dockerCmdWithError("plugin", "enable", id[:5]) + c.Assert(err, checker.IsNil) + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + // Disable + _, _, err = dockerCmdWithError("plugin", "disable", id[:5]) + c.Assert(err, checker.IsNil) + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "false") + + // Remove + out, _, err = dockerCmdWithError("plugin", "remove", id[:5]) + c.Assert(err, checker.IsNil) + // List returns none + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), pName) + c.Assert(out, checker.Not(checker.Contains), pTag) +} + +func (s *DockerSuite) TestPluginListDefaultFormat(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, Network, IsAmd64) + + config, err := ioutil.TempDir("", "config-file-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(config) + + err = ioutil.WriteFile(filepath.Join(config, "config.json"), []byte(`{"pluginsFormat": "raw"}`), 0644) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", pName) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _ = dockerCmd(c, "plugin", "inspect", "--format", "{{.ID}}", pNameWithTag) + id := strings.TrimSpace(out) + + // We expect the format to be in `raw + --no-trunc` + expectedOutput := fmt.Sprintf(`plugin_id: %s +name: %s +description: A sample volume plugin for Docker +enabled: true`, id, pNameWithTag) + + out, _ = dockerCmd(c, "--config", config, "plugin", "ls", "--no-trunc") + c.Assert(strings.TrimSpace(out), checker.Contains, expectedOutput) +} + +func (s *DockerSuite) TestPluginUpgrade(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + plugin := "cpuguy83/docker-volume-driver-plugin-local:latest" + pluginV2 := "cpuguy83/docker-volume-driver-plugin-local:v2" + + dockerCmd(c, "plugin", "install", "--grant-all-permissions", plugin) + dockerCmd(c, "volume", "create", "--driver", plugin, "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "touch /apple/core") + + out, _, err := dockerCmdWithError("plugin", "upgrade", "--grant-all-permissions", plugin, pluginV2) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "disabled before upgrading") + + out, _ = dockerCmd(c, "plugin", "inspect", "--format={{.ID}}", plugin) + id := strings.TrimSpace(out) + + // make sure "v2" does not exists + _, err = os.Stat(filepath.Join(testEnv.DockerBasePath(), "plugins", id, "rootfs", "v2")) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf(out)) + + dockerCmd(c, "plugin", "disable", "-f", plugin) + dockerCmd(c, "plugin", "upgrade", "--grant-all-permissions", "--skip-remote-check", plugin, pluginV2) + + // make sure "v2" file exists + _, err = os.Stat(filepath.Join(testEnv.DockerBasePath(), "plugins", id, "rootfs", "v2")) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "plugin", "enable", plugin) + dockerCmd(c, "volume", "inspect", "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "ls -lh /apple/core") +} + +func (s *DockerSuite) TestPluginMetricsCollector(c *check.C) { + c.Skip("Plugin isn't supported") + + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{}) + d.Start(c) + defer d.Stop(c) + + name := "cpuguy83/docker-metrics-plugin-test:latest" + r := cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", name), cli.Daemon(d)) + c.Assert(r.Error, checker.IsNil, check.Commentf(r.Combined())) + + // plugin lisens on localhost:19393 and proxies the metrics + resp, err := http.Get("http://localhost:19393/metrics") + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + c.Assert(err, checker.IsNil) + // check that a known metric is there... don't epect this metric to change over time.. probably safe + c.Assert(string(b), checker.Contains, "container_actions") +} diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go new file mode 100644 index 0000000..bcb87f5 --- /dev/null +++ b/integration-cli/docker_cli_port_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "fmt" + "net" + "regexp" + "sort" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPortList(c *check.C) { + testRequires(c, DaemonIsLinux) + // one port + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", firstID) + + err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", firstID) + + // three port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", ID) + + // more and one port mapped to the same container port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9999:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "80/tcp -> 0.0.0.0:9999", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + testRange := func() { + // host port ranges used + IDs := make([]string, 3) + for i := 0; i < 3; i++ { + out, _ = dockerCmd(c, "run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + IDs[i] = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", IDs[i]) + + err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) + // Port list is not correct + c.Assert(err, checker.IsNil) + } + + // test port range exhaustion + out, _, err = dockerCmdWithError("run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + // Exhausted port range did not return an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + for i := 0; i < 3; i++ { + dockerCmd(c, "rm", "-f", IDs[i]) + } + } + testRange() + // Verify we ran re-use port ranges after they are no longer in use. + testRange() + + // test invalid port ranges + for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { + out, _, err = dockerCmdWithError("run", "-d", + "-p", invalidRange, + "busybox", "top") + // Port range should have returned an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + } + + // test host range:container range spec. + out, _ = dockerCmd(c, "run", "-d", + "-p", "9800-9803:80-83", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9800", + "81/tcp -> 0.0.0.0:9801", + "82/tcp -> 0.0.0.0:9802", + "83/tcp -> 0.0.0.0:9803"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + // test mixing protocols in same port range + out, _ = dockerCmd(c, "run", "-d", + "-p", "8000-8080:80", + "-p", "8000-8080:80/udp", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:8000", + "80/udp -> 0.0.0.0:8000"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) +} + +func assertPortList(c *check.C, out string, expected []string) error { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != len(expected) { + return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + } + sort.Strings(lines) + sort.Strings(expected) + + for i := 0; i < len(expected); i++ { + if lines[i] != expected[i] { + return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") + } + } + + return nil +} + +func stopRemoveContainer(id string, c *check.C) { + dockerCmd(c, "rm", "-f", id) +} + +func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) { + testRequires(c, DaemonIsLinux) + // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) + + // Run the container forcing to publish the exposed ports + dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the exposed ports in the port bindings + expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) + expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output + c.Assert(expBndRegx1.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort1: %s", out, unpPort1)) + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output + c.Assert(expBndRegx2.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort2: %s", out, unpPort2)) + + // Run the container specifying explicit port bindings for the exposed ports + offset := 10000 + pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) + pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") + id := strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) + expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with explicit port bindings and no exposed ports + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") + id = strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with one unpublished exposed port and one explicit port binding + dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the specified unpublished port and port mapping + out, _ = dockerCmd(c, "ps", "-n=1") + // Missing unpublished exposed ports (unpPort1) in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) +} + +func (s *DockerSuite) TestPortHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", "9876") + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + _, exposedPort, err := net.SplitHostPort(out) + c.Assert(err, checker.IsNil, check.Commentf("out: %s", out)) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortBindingOnSandbox(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") + nr := getNetworkResource(c, "internal-net") + c.Assert(nr.Internal, checker.Equals, true) + + dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", + "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") + c.Assert(waitRun("c1"), check.IsNil) + + _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.NotNil, + check.Commentf("Port mapping on internal network is expected to fail")) + + // Connect container to another normal bridge network + dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") + dockerCmd(c, "network", "connect", "foo-net", "c1") + + _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.IsNil, + check.Commentf("Port mapping on the new network is expected to succeed")) + +} diff --git a/integration-cli/docker_cli_proxy_test.go b/integration-cli/docker_cli_proxy_test.go new file mode 100644 index 0000000..3344985 --- /dev/null +++ b/integration-cli/docker_cli_proxy_test.go @@ -0,0 +1,51 @@ +package main + +import ( + "net" + "strings" + + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCLIProxyDisableProxyUnixSock(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999"), + }).Assert(c, icmd.Success) +} + +// Can't use localhost here since go has a special case to not use proxy if connecting to localhost +// See https://golang.org/pkg/net/http/#ProxyFromEnvironment +func (s *DockerDaemonSuite) TestCLIProxyProxyTCPSock(c *check.C) { + testRequires(c, SameHostDaemon) + // get the IP to use to connect since we can't use localhost + addrs, err := net.InterfaceAddrs() + c.Assert(err, checker.IsNil) + var ip string + for _, addr := range addrs { + sAddr := addr.String() + if !strings.Contains(sAddr, "127.0.0.1") { + addrArr := strings.Split(sAddr, "/") + ip = addrArr[0] + break + } + } + + c.Assert(ip, checker.Not(checker.Equals), "") + + s.d.Start(c, "-H", "tcp://"+ip+":2375") + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"}, + }).Assert(c, icmd.Expected{Error: "exit status 1", ExitCode: 1}) + // Test with no_proxy + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999", "NO_PROXY=" + ip}, + }).Assert(c, icmd.Success) +} diff --git a/integration-cli/docker_cli_prune_unix_test.go b/integration-cli/docker_cli_prune_unix_test.go new file mode 100644 index 0000000..e4e22eb --- /dev/null +++ b/integration-cli/docker_cli_prune_unix_test.go @@ -0,0 +1,294 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func pruneNetworkAndVerify(c *check.C, d *daemon.Swarm, kept, pruned []string) { + _, err := d.Cmd("network", "prune", "--force") + c.Assert(err, checker.IsNil) + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + for _, s := range kept { + c.Assert(out, checker.Contains, s) + } + for _, s := range pruned { + c.Assert(out, checker.Not(checker.Contains), s) + } +} + +func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + _, err := d.Cmd("network", "create", "n1") // used by container (testprune) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n2") + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n3", "--driver", "overlay") // used by service (testprunesvc) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n4", "--driver", "overlay") + c.Assert(err, checker.IsNil) + + cName := "testprune" + _, err = d.Cmd("run", "-d", "--name", cName, "--net", "n1", "busybox", "top") + c.Assert(err, checker.IsNil) + + serviceName := "testprunesvc" + replicas := 1 + out, err := d.Cmd("service", "create", "--no-resolve-image", + "--name", serviceName, + "--replicas", strconv.Itoa(replicas), + "--network", "n3", + "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas+1) + + // prune and verify + pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"}) + + // remove containers, then prune and verify again + _, err = d.Cmd("rm", "-f", cName) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) + pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) +} + +func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { + s.d.StartWithBusybox(c) + + out, _, err := s.d.BuildImageWithOut("test", + `FROM busybox + LABEL foo=bar`, true, "-q") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force", "--all") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) +} + +func (s *DockerSuite) TestPruneContainerUntil(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id1 := strings.TrimSpace(out) + cli.WaitExited(c, id1, 5*time.Second) + + until := daemonUnixTime(c) + + out = cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id2 := strings.TrimSpace(out) + cli.WaitExited(c, id2, 5*time.Second) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "until="+until).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) +} + +func (s *DockerSuite) TestPruneContainerLabel(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "--label", "foo", "busybox").Combined() + id1 := strings.TrimSpace(out) + cli.WaitExited(c, id1, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "--label", "bar", "busybox").Combined() + id2 := strings.TrimSpace(out) + cli.WaitExited(c, id2, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id3 := strings.TrimSpace(out) + cli.WaitExited(c, id3, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "--label", "foobar", "busybox").Combined() + id4 := strings.TrimSpace(out) + cli.WaitExited(c, id4, 5*time.Second) + + // Add a config file of label=foobar, that will have no impact if cli is label!=foobar + config := `{"pruneFilters": ["label=foobar"]}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + // With config.json only, prune based on label=foobar + out = cli.DockerCmd(c, "--config", d, "container", "prune", "--force").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + c.Assert(strings.TrimSpace(out), checker.Contains, id4) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "label=foo").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "label!=bar").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + // With config.json label=foobar and CLI label!=foobar, CLI label!=foobar supersede + out = cli.DockerCmd(c, "--config", d, "container", "prune", "--force", "--filter", "label!=foobar").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) +} + +func (s *DockerSuite) TestPruneVolumeLabel(c *check.C) { + out, _ := dockerCmd(c, "volume", "create", "--label", "foo") + id1 := strings.TrimSpace(out) + c.Assert(id1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create", "--label", "bar") + id2 := strings.TrimSpace(out) + c.Assert(id2, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create") + id3 := strings.TrimSpace(out) + c.Assert(id3, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create", "--label", "foobar") + id4 := strings.TrimSpace(out) + c.Assert(id4, checker.Not(checker.Equals), "") + + // Add a config file of label=foobar, that will have no impact if cli is label!=foobar + config := `{"pruneFilters": ["label=foobar"]}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + // With config.json only, prune based on label=foobar + out, _ = dockerCmd(c, "--config", d, "volume", "prune", "--force") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + c.Assert(strings.TrimSpace(out), checker.Contains, id4) + + out, _ = dockerCmd(c, "volume", "prune", "--force", "--filter", "label=foo") + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out, _ = dockerCmd(c, "volume", "prune", "--force", "--filter", "label!=bar") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + // With config.json label=foobar and CLI label!=foobar, CLI label!=foobar supersede + out, _ = dockerCmd(c, "--config", d, "volume", "prune", "--force", "--filter", "label!=foobar") + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) +} + +func (s *DockerSuite) TestPruneNetworkLabel(c *check.C) { + c.Skip("swarm isn't supported") + + dockerCmd(c, "network", "create", "--label", "foo", "n1") + dockerCmd(c, "network", "create", "--label", "bar", "n2") + dockerCmd(c, "network", "create", "n3") + + out, _ := dockerCmd(c, "network", "prune", "--force", "--filter", "label=foo") + c.Assert(strings.TrimSpace(out), checker.Contains, "n1") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n2") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n3") + + out, _ = dockerCmd(c, "network", "prune", "--force", "--filter", "label!=bar") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n1") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n2") + c.Assert(strings.TrimSpace(out), checker.Contains, "n3") + + out, _ = dockerCmd(c, "network", "prune", "--force") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n1") + c.Assert(strings.TrimSpace(out), checker.Contains, "n2") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n3") +} + +func (s *DockerDaemonSuite) TestPruneImageLabel(c *check.C) { + s.d.StartWithBusybox(c) + + out, _, err := s.d.BuildImageWithOut("test1", + `FROM busybox + LABEL foo=bar`, true, "-q") + c.Assert(err, checker.IsNil) + id1 := strings.TrimSpace(out) + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + + out, _, err = s.d.BuildImageWithOut("test2", + `FROM busybox + LABEL bar=foo`, true, "-q") + c.Assert(err, checker.IsNil) + id2 := strings.TrimSpace(out) + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label=foo=bar") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label!=bar=foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label=bar=foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) +} diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go new file mode 100644 index 0000000..b6c9350 --- /dev/null +++ b/integration-cli/docker_cli_ps_test.go @@ -0,0 +1,967 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPsListContainersBase(c *check.C) { + out := runSleepingContainer(c, "-d") + firstID := strings.TrimSpace(out) + + out = runSleepingContainer(c, "-d") + secondID := strings.TrimSpace(out) + + // not long running + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + thirdID := strings.TrimSpace(out) + + out = runSleepingContainer(c, "-d") + fourthID := strings.TrimSpace(out) + + // make sure the second is running + c.Assert(waitRun(secondID), checker.IsNil) + + // make sure third one is not running + dockerCmd(c, "wait", thirdID) + + // make sure the forth is running + c.Assert(waitRun(fourthID), checker.IsNil) + + // all + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) + + // running + out, _ = dockerCmd(c, "ps") + c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) + + // limit + out, _ = dockerCmd(c, "ps", "-n=2", "-a") + expected := []string{fourthID, thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") + expected = []string{fourthID, thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) + expected = []string{fourthID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID) + expected = []string{fourthID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter before + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & before + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) + expected = []string{secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") + expected = []string{fourthID, thirdID} + + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since & filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + +} + +func assertContainerList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + containerIDIndex := strings.Index(lines[0], "CONTAINER ID") + for i := 0; i < len(expected); i++ { + foundID := lines[i+1][containerIDIndex : containerIDIndex+12] + if foundID != expected[i][:12] { + return false + } + } + + return true +} + +// FIXME(vdemeester) Move this into a unit test in daemon package +func (s *DockerSuite) TestPsListContainersInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("ps", "-f", "invalidFilter=test") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestPsListContainersSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "busybox") + + baseOut, _ := dockerCmd(c, "ps", "-s", "-n=1") + baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") + baseSizeIndex := strings.Index(baseLines[0], "SIZE") + baseFoundsize := baseLines[1][baseSizeIndex:] + baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, "B")[0]) + c.Assert(err, checker.IsNil) + + name := "test_size" + dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") + id := getIDByName(c, name) + + var result *icmd.Result + + wait := make(chan struct{}) + go func() { + result = icmd.RunCommand(dockerBinary, "ps", "-s", "-n=1") + close(wait) + }() + select { + case <-wait: + case <-time.After(3 * time.Second): + c.Fatalf("Calling \"docker ps -s\" timed out!") + } + result.Assert(c, icmd.Success) + lines := strings.Split(strings.Trim(result.Combined(), "\n "), "\n") + c.Assert(lines, checker.HasLen, 2, check.Commentf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))) + sizeIndex := strings.Index(lines[0], "SIZE") + idIndex := strings.Index(lines[0], "CONTAINER ID") + foundID := lines[1][idIndex : idIndex+12] + c.Assert(foundID, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s", id[:12], foundID)) + expectedSize := fmt.Sprintf("%dB", (2 + baseBytes)) + foundSize := lines[1][sizeIndex:] + c.Assert(foundSize, checker.Contains, expectedSize, check.Commentf("Expected size %q, got %q", expectedSize, foundSize)) +} + +func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { + // start exited container + out := cli.DockerCmd(c, "run", "-d", "busybox").Combined() + firstID := strings.TrimSpace(out) + + // make sure the exited container is not running + cli.DockerCmd(c, "wait", firstID) + + // start running container + out = cli.DockerCmd(c, "run", "-itd", "busybox").Combined() + secondID := strings.TrimSpace(out) + + // filter containers by exited + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited").Combined() + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID) + + out = cli.DockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, secondID) + + result := cli.Docker(cli.Args("ps", "-a", "-q", "--filter=status=rubbish"), cli.WithTimeout(time.Second*60)) + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Unrecognised filter value for status", + }) + + // Windows doesn't support pausing of containers + if testEnv.DaemonPlatform() != "windows" { + // pause running container + out = cli.DockerCmd(c, "run", "-itd", "busybox").Combined() + pausedID := strings.TrimSpace(out) + cli.DockerCmd(c, "pause", pausedID) + // make sure the container is unpaused to let the daemon stop it properly + defer func() { cli.DockerCmd(c, "unpause", pausedID) }() + + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, pausedID) + } +} + +func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { + // Test legacy no health check + out := runSleepingContainer(c, "--name=none_legacy") + containerID := strings.TrimSpace(out) + + cli.WaitRun(c, containerID) + + out = cli.DockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none").Combined() + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for legacy none filter, output: %q", containerID, containerOut, out)) + + // Test no health check specified explicitly + out = runSleepingContainer(c, "--name=none", "--no-healthcheck") + containerID = strings.TrimSpace(out) + + cli.WaitRun(c, containerID) + + out = cli.DockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for none filter, output: %q", containerID, containerOut, out)) + + // Test failing health check + out = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "failing_container", "starting", "unhealthy") + + out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for unhealthy filter, output: %q", containerID, containerOut, out)) + + // Check passing healthcheck + out = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "passing_container", "starting", "healthy") + + out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy").Combined() + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { + // start container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // start another container + runSleepingContainer(c) + + // filter containers by id + out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=id="+firstID) + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { + // start container + dockerCmd(c, "run", "--name=a_name_to_match", "busybox") + id := getIDByName(c, "a_name_to_match") + + // start another container + runSleepingContainer(c, "--name=b_name_to_match") + + // filter containers by name + out, _ := dockerCmd(c, "ps", "-a", "-q", "--filter=name=a_name_to_match") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", id[:12], containerOut, out)) +} + +// Test for the ancestor filter for ps. +// There is also the same test but with image:tag@digest in docker_cli_by_digest_test.go +// +// What the test setups : +// - Create 2 image based on busybox using the same repository but different tags +// - Create an image based on the previous image (images_ps_filter_test2) +// - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) +// - Filter them out :P +func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { + // Build images + imageName1 := "images_ps_filter_test1" + buildImageSuccessfully(c, imageName1, build.WithDockerfile(`FROM busybox + LABEL match me 1`)) + imageID1 := getIDByName(c, imageName1) + + imageName1Tagged := "images_ps_filter_test1:tag" + buildImageSuccessfully(c, imageName1Tagged, build.WithDockerfile(`FROM busybox + LABEL match me 1 tagged`)) + imageID1Tagged := getIDByName(c, imageName1Tagged) + + imageName2 := "images_ps_filter_test2" + buildImageSuccessfully(c, imageName2, build.WithDockerfile(fmt.Sprintf(`FROM %s + LABEL match me 2`, imageName1))) + imageID2 := getIDByName(c, imageName2) + + // start containers + dockerCmd(c, "run", "--name=first", "busybox", "echo", "hello") + firstID := getIDByName(c, "first") + + // start another container + dockerCmd(c, "run", "--name=second", "busybox", "echo", "hello") + secondID := getIDByName(c, "second") + + // start third container + dockerCmd(c, "run", "--name=third", imageName1, "echo", "hello") + thirdID := getIDByName(c, "third") + + // start fourth container + dockerCmd(c, "run", "--name=fourth", imageName1Tagged, "echo", "hello") + fourthID := getIDByName(c, "fourth") + + // start fifth container + dockerCmd(c, "run", "--name=fifth", imageName2, "echo", "hello") + fifthID := getIDByName(c, "fifth") + + var filterTestSuite = []struct { + filterName string + expectedIDs []string + }{ + // non existent stuff + {"nonexistent", []string{}}, + {"nonexistent:tag", []string{}}, + // image + {"busybox", []string{firstID, secondID, thirdID, fourthID, fifthID}}, + {imageName1, []string{thirdID, fifthID}}, + {imageName2, []string{fifthID}}, + // image:tag + {fmt.Sprintf("%s:latest", imageName1), []string{thirdID, fifthID}}, + {imageName1Tagged, []string{fourthID}}, + // short-id + {stringid.TruncateID(imageID1), []string{thirdID, fifthID}}, + {stringid.TruncateID(imageID2), []string{fifthID}}, + // full-id + {imageID1, []string{thirdID, fifthID}}, + {imageID1Tagged, []string{fourthID}}, + {imageID2, []string{fifthID}}, + } + + var out string + for _, filter := range filterTestSuite { + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) + checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) + } + + // Multiple ancestor filter + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) + checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) +} + +func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { + actualIDs := []string{} + if out != "" { + actualIDs = strings.Split(out[:len(out)-1], "\n") + } + sort.Strings(actualIDs) + sort.Strings(expectedIDs) + + c.Assert(actualIDs, checker.HasLen, len(expectedIDs), check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v:%v, got %v:%v", filterName, len(expectedIDs), expectedIDs, len(actualIDs), actualIDs)) + if len(expectedIDs) > 0 { + same := true + for i := range expectedIDs { + if actualIDs[i] != expectedIDs[i] { + c.Logf("%s, %s", actualIDs[i], expectedIDs[i]) + same = false + break + } + } + c.Assert(same, checker.Equals, true, check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v, got %v", filterName, expectedIDs, actualIDs)) + } +} + +func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { + // start container + dockerCmd(c, "run", "--name=first", "-l", "match=me", "-l", "second=tag", "busybox") + firstID := getIDByName(c, "first") + + // start another container + dockerCmd(c, "run", "--name=second", "-l", "match=me too", "busybox") + secondID := getIDByName(c, "second") + + // start third container + dockerCmd(c, "run", "--name=third", "-l", "nomatch=me", "busybox") + thirdID := getIDByName(c, "third") + + // filter containers by exact match + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels, but expect not found because of AND behavior + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, "", check.Commentf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)) + + // filter containers by exact key + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Contains, firstID) + c.Assert(containerOut, checker.Contains, secondID) + c.Assert(containerOut, checker.Not(checker.Contains), thirdID) +} + +func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { + runSleepingContainer(c, "--name=sleep") + + dockerCmd(c, "run", "--name", "zero1", "busybox", "true") + firstZero := getIDByName(c, "zero1") + + dockerCmd(c, "run", "--name", "zero2", "busybox", "true") + secondZero := getIDByName(c, "zero2") + + out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + + firstNonZero := getIDByName(c, "nonzero1") + + out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + secondNonZero := getIDByName(c, "nonzero2") + + // filter containers by exited=0 + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + ids := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d: %s", len(ids), out)) + c.Assert(ids[0], checker.Equals, secondZero, check.Commentf("First in list should be %q, got %q", secondZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstZero, check.Commentf("Second in list should be %q, got %q", firstZero, ids[1])) + + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + ids = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d", len(ids))) + c.Assert(ids[0], checker.Equals, secondNonZero, check.Commentf("First in list should be %q, got %q", secondNonZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstNonZero, check.Commentf("Second in list should be %q, got %q", firstNonZero, ids[1])) + +} + +func (s *DockerSuite) TestPsRightTagName(c *check.C) { + // TODO Investigate further why this fails on Windows to Windows CI + testRequires(c, DaemonIsLinux) + tag := "asybox:shmatest" + dockerCmd(c, "tag", "busybox", tag) + + var id1 string + out := runSleepingContainer(c) + id1 = strings.TrimSpace(string(out)) + + var id2 string + out = runSleepingContainerInImage(c, tag) + id2 = strings.TrimSpace(string(out)) + + var imageID string + out = inspectField(c, "busybox", "Id") + imageID = strings.TrimSpace(string(out)) + + var id3 string + out = runSleepingContainerInImage(c, imageID) + id3 = strings.TrimSpace(string(out)) + + out, _ = dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // skip header + lines = lines[1:] + c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) + for _, line := range lines { + f := strings.Fields(line) + switch f[0] { + case id1: + c.Assert(f[1], checker.Equals, "busybox", check.Commentf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])) + case id2: + c.Assert(f[1], checker.Equals, tag, check.Commentf("Expected %s tag for id %s, got %s", tag, id2, f[1])) + case id3: + c.Assert(f[1], checker.Equals, imageID, check.Commentf("Expected %s imageID for id %s, got %s", tag, id3, f[1])) + default: + c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) + } + } +} + +func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { + // Problematic on Windows as it doesn't support links as of Jan 2016 + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "--name=first") + runSleepingContainer(c, "--name=second", "--link=first:first") + + out, _ := dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // strip header + lines = lines[1:] + expected := []string{"second", "first,second/first"} + var names []string + for _, l := range lines { + fields := strings.Fields(l) + names = append(names, fields[len(fields)-1]) + } + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { + // Problematic on Windows as it doesn't support port ranges as of Jan 2016 + testRequires(c, DaemonIsLinux) + portRange := "3850-3900" + dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") + + out, _ := dockerCmd(c, "ps") + + c.Assert(string(out), checker.Contains, portRange, check.Commentf("docker ps output should have had the port range %q: %s", portRange, string(out))) + +} + +func (s *DockerSuite) TestPsWithSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") + + out, _ := dockerCmd(c, "ps", "--size") + c.Assert(out, checker.Contains, "virtual", check.Commentf("docker ps with --size should show virtual size of container")) +} + +func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + shortCID := cID[:12] + + // Make sure it DOESN'T show up w/o a '-a' for normal 'ps' + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), shortCID, check.Commentf("Should have not seen '%s' in ps output:\n%s", shortCID, out)) + + // Make sure it DOES show up as 'Created' for 'ps -a' + out, _ = dockerCmd(c, "ps", "-a") + + hits := 0 + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, shortCID) { + continue + } + hits++ + c.Assert(line, checker.Contains, "Created", check.Commentf("Missing 'Created' on '%s'", line)) + } + + c.Assert(hits, checker.Equals, 1, check.Commentf("Should have seen '%s' in ps -a output once:%d\n%s", shortCID, hits, out)) + + // filter containers by 'create' - note, no -a needed + out, _ = dockerCmd(c, "ps", "-q", "-f", "status=created") + containerOut := strings.TrimSpace(out) + c.Assert(cID, checker.HasPrefix, containerOut) +} + +func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { + // Problematic on Windows as it doesn't support link as of Jan 2016 + testRequires(c, DaemonIsLinux) + //create 2 containers and link them + dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") + dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") + + //use the new format capabilities to only list the names and --no-trunc to get all names + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"parent", "child,parent/linkedone"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) + + //now list without turning off truncation and make sure we only get the non-link names + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + expected = []string{"parent", "child"} + var truncNames []string + truncNames = append(truncNames, lines...) + c.Assert(expected, checker.DeepEquals, truncNames, check.Commentf("Expected array with truncated names: %v, got: %v", expected, truncNames)) +} + +// Test for GitHub issue #21772 +func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) { + runSleepingContainer(c, "--name=test1") + runSleepingContainer(c, "--name=test2") + + //use the new format capabilities to list the names twice + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"test2 test2", "test1 test1"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { + // make sure no-container "docker ps" still prints the header row + out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") + c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) + + // verify that "docker ps" with a container still prints the header row also + runSleepingContainer(c, "--name=test") + out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") + c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) +} + +func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { + config := `{ + "psFormat": "default {{ .ID }}" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out := runSleepingContainer(c, "--name=test") + id := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "--config", d, "ps", "-q") + c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) +} + +// Test for GitHub issue #12595 +func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { + // TODO: Investigate why this fails on Windows to Windows CI further. + testRequires(c, DaemonIsLinux) + originalImageName := "busybox:TestPsImageIDAfterUpdate-original" + updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" + + icmd.RunCommand(dockerBinary, "tag", "busybox:latest", originalImageName).Assert(c, icmd.Success) + + originalImageID := getIDByName(c, originalImageName) + + result := icmd.RunCommand(dockerBinary, append([]string{"run", "-d", originalImageName}, sleepCommandForDaemonPlatform()...)...) + result.Assert(c, icmd.Success) + containerID := strings.TrimSpace(result.Combined()) + + result = icmd.RunCommand(dockerBinary, "ps", "--no-trunc") + result.Assert(c, icmd.Success) + + lines := strings.Split(strings.TrimSpace(string(result.Combined())), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageName) + } + + icmd.RunCommand(dockerBinary, "commit", containerID, updatedImageName).Assert(c, icmd.Success) + icmd.RunCommand(dockerBinary, "tag", updatedImageName, originalImageName).Assert(c, icmd.Success) + + result = icmd.RunCommand(dockerBinary, "ps", "--no-trunc") + result.Assert(c, icmd.Success) + + lines = strings.Split(strings.TrimSpace(string(result.Combined())), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageID) + } + +} + +func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + out, _ := dockerCmd(c, "ps") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := "0.0.0.0:5000->5000/tcp" + fields := strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Equals, expected, check.Commentf("Expected: %v, got: %v", expected, fields[len(fields)-2])) + + dockerCmd(c, "kill", "foo") + dockerCmd(c, "wait", "foo") + out, _ = dockerCmd(c, "ps", "-l") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + fields = strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Not(checker.Equals), expected, check.Commentf("Should not got %v", expected)) +} + +func (s *DockerSuite) TestPsShowMounts(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + mp := prefix + slash + "test" + + dockerCmd(c, "volume", "create", "ps-volume-test") + // volume mount containers + runSleepingContainer(c, "--name=volume-test-1", "--volume", "ps-volume-test:"+mp) + c.Assert(waitRun("volume-test-1"), checker.IsNil) + runSleepingContainer(c, "--name=volume-test-2", "--volume", mp) + c.Assert(waitRun("volume-test-2"), checker.IsNil) + // bind mount container + var bindMountSource string + var bindMountDestination string + if DaemonIsWindows() { + bindMountSource = "c:\\" + bindMountDestination = "c:\\t" + } else { + bindMountSource = "/tmp" + bindMountDestination = "/t" + } + runSleepingContainer(c, "--name=bind-mount-test", "-v", bindMountSource+":"+bindMountDestination) + c.Assert(waitRun("bind-mount-test"), checker.IsNil) + + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}") + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 3) + + fields := strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + fields = strings.Fields(lines[1]) + c.Assert(fields, checker.HasLen, 2) + + annonymounsVolumeID := fields[1] + + fields = strings.Fields(lines[2]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by volume name + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test") + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // empty results filtering by unknown volume + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=this-volume-should-not-exist") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) + + // filter by mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 2) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, annonymounsVolumeID) + fields = strings.Fields(lines[1]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by bind mount source + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // filter by bind mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // empty results filtering by unknown mount point + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+prefix+slash+"this-path-was-never-mounted") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) +} + +func (s *DockerSuite) TestPsFormatSize(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c) + + out, _ := dockerCmd(c, "ps", "--format", "table {{.Size}}") + lines := strings.Split(out, "\n") + c.Assert(lines[1], checker.Not(checker.Equals), "0 B", check.Commentf("Should not display a size of 0 B")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "table {{.Size}}") + lines = strings.Split(out, "\n") + c.Assert(lines[0], checker.Equals, "SIZE", check.Commentf("Should only have one size column")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "raw") + lines = strings.Split(out, "\n") + c.Assert(lines[8], checker.HasPrefix, "size:", check.Commentf("Size should be appended on a newline")) +} + +func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { + // TODO default network on Windows is not called "bridge", and creating a + // custom network fails on Windows fails with "Error response from daemon: plugin not found") + testRequires(c, DaemonIsLinux) + + // create some containers + runSleepingContainer(c, "--net=bridge", "--name=onbridgenetwork") + runSleepingContainer(c, "--net=none", "--name=onnonenetwork") + + // Filter docker ps on non existing network + out, _ := dockerCmd(c, "ps", "--filter", "network=doesnotexist") + containerOut := strings.TrimSpace(string(out)) + lines := strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have no containers + c.Assert(lines, checker.HasLen, 0) + + // Filter docker ps on network bridge + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have only one container + c.Assert(lines, checker.HasLen, 1) + + // Making sure onbridgenetwork is on the output + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) + + // Filter docker ps on networks bridge and none + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge", "--filter", "network=none") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + //ps output should have both the containers + c.Assert(lines, checker.HasLen, 2) + + // Making sure onbridgenetwork and onnonenetwork is on the output + c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n")) + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on bridge network\n")) + + nwID, _ := dockerCmd(c, "network", "inspect", "--format", "{{.ID}}", "bridge") + + // Filter by network ID + out, _ = dockerCmd(c, "ps", "--filter", "network="+nwID) + containerOut = strings.TrimSpace(string(out)) + + c.Assert(containerOut, checker.Contains, "onbridgenetwork") + + // Filter by partial network ID + partialnwID := string(nwID[0:4]) + + out, _ = dockerCmd(c, "ps", "--filter", "network="+partialnwID) + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + // skip header + lines = lines[1:] + + // ps output should have only one container + c.Assert(lines, checker.HasLen, 1) + + // Making sure onbridgenetwork is on the output + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) + +} + +func (s *DockerSuite) TestPsByOrder(c *check.C) { + name1 := "xyz-abc" + out := runSleepingContainer(c, "--name", name1) + container1 := strings.TrimSpace(out) + + name2 := "xyz-123" + out = runSleepingContainer(c, "--name", name2) + container2 := strings.TrimSpace(out) + + name3 := "789-abc" + out = runSleepingContainer(c, "--name", name3) + + name4 := "789-123" + out = runSleepingContainer(c, "--name", name4) + + // Run multiple time should have the same result + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz").Combined() + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) + + // Run multiple time should have the same result + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz").Combined() + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) +} + +func (s *DockerSuite) TestPsFilterMissingArgErrorCode(c *check.C) { + _, errCode, _ := dockerCmdWithError("ps", "--filter") + c.Assert(errCode, checker.Equals, 125) +} + +// Test case for 30291 +func (s *DockerSuite) TestPsFormatTemplateWithArg(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top", "--label", "some.label=label.foo-bar") + out, _ := dockerCmd(c, "ps", "--format", `{{.Names}} {{.Label "some.label"}}`) + c.Assert(strings.TrimSpace(out), checker.Equals, "top label.foo-bar") +} + +func (s *DockerSuite) TestPsListContainersFilterPorts(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "--publish=80", "busybox", "top") + id1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "--expose=8080", "busybox", "top") + id2 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "publish=80-8080/udp") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=8081") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "publish=80-81") + c.Assert(strings.TrimSpace(out), checker.Equals, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=80/tcp") + c.Assert(strings.TrimSpace(out), checker.Equals, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=8080/tcp") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Equals, id2) +} diff --git a/integration-cli/docker_cli_pull_local_test.go b/integration-cli/docker_cli_pull_local_test.go new file mode 100644 index 0000000..a45e313 --- /dev/null +++ b/integration-cli/docker_cli_pull_local_test.go @@ -0,0 +1,470 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other +// tags for the same image) are not also pulled down. +// +// Ref: docker/docker#8141 +func testPullImageWithAliases(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh"} { + repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) + } + + // Tag and push the same image multiple times. + for _, repo := range repos { + dockerCmd(c, "tag", "busybox", repo) + dockerCmd(c, "push", repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Pull a single tag and verify it doesn't bring down all aliases. + dockerCmd(c, "pull", repos[0]) + dockerCmd(c, "inspect", repos[0]) + for _, repo := range repos[1:] { + _, _, err := dockerCmdWithError("inspect", repo) + c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo)) + } +} + +func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +// testConcurrentPullWholeRepo pulls the same repo concurrently. +func testConcurrentPullWholeRepo(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo))) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Run multiple re-pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + result := icmd.RunCommand(dockerBinary, "pull", "-a", repoName) + results <- result.Error + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +// testConcurrentFailingPull tries a concurrent pull that doesn't succeed. +func testConcurrentFailingPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + // Run multiple pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + result := icmd.RunCommand(dockerBinary, "pull", repoName+":asdfasdf") + results <- result.Error + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail")) + } +} + +func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +// testConcurrentPullMultipleTags pulls multiple tags from the same repo +// concurrently. +func testConcurrentPullMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo))) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull individual tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + result := icmd.RunCommand(dockerBinary, "pull", repo) + results <- result.Error + }(repo) + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +// testPullIDStability verifies that pushing an image and pulling it back +// preserves the image ID. +func testPullIDStability(c *check.C) { + derivedImage := privateRegistryURL + "/dockercli/id-stability" + baseImage := "busybox" + + buildImageSuccessfully(c, derivedImage, build.WithDockerfile(fmt.Sprintf(` + FROM %s + ENV derived true + ENV asdf true + RUN dd if=/dev/zero of=/file bs=1024 count=1024 + CMD echo %s + `, baseImage, derivedImage))) + + originalID := getIDByName(c, derivedImage) + dockerCmd(c, "push", derivedImage) + + // Pull + out, _ := dockerCmd(c, "pull", derivedImage) + if strings.Contains(out, "Pull complete") { + c.Fatalf("repull redownloaded a layer: %s", out) + } + + derivedIDAfterPull := getIDByName(c, derivedImage) + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + // Make sure the image runs correctly + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } + + // Confirm that repushing and repulling does not change the computed ID + dockerCmd(c, "push", derivedImage) + dockerCmd(c, "rmi", derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull = getIDByName(c, derivedImage) + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + // Make sure the image still runs + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } +} + +func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +// #21213 +func testPullNoLayers(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) + + buildImageSuccessfully(c, repoName, build.WithDockerfile(` + FROM scratch + ENV foo bar`)) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + dockerCmd(c, "pull", repoName) +} + +func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { + testRequires(c, NotArm) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Inject a manifest list into the registry + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "bogus_arch", + OS: "bogus_os", + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: pushDigest, + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + }, + }, + }, + } + + manifestListJSON, err := json.MarshalIndent(manifestList, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list")) + + manifestListDigest := digest.FromBytes(manifestListJSON) + hexDigest := manifestListDigest.Hex() + + registryV2Path := s.reg.Path() + + // Write manifest list to blob store + blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) + err = os.MkdirAll(blobDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir")) + blobPath := filepath.Join(blobDir, "data") + err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list")) + + // Add to revision store + revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest) + err = os.Mkdir(revisionDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir")) + revisionPath := filepath.Join(revisionDir, "link") + err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing revision link")) + + // Update tag + tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link") + err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing tag link")) + + // Verify that the image can be pulled through the manifest list. + out, _ := dockerCmd(c, "pull", repoName) + + // The pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // Make sure the pushed and pull digests match + c.Assert(manifestListDigest.String(), checker.Equals, pullDigest) + + // Was the image actually created? + dockerCmd(c, "inspect", repoName) + + dockerCmd(c, "rmi", repoName) +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithScheme(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), "https://"+privateRegistryURL) + dockerCmd(c, "--config", tmp, "pull", repoName) + + // likewise push should work + repoName2 := fmt.Sprintf("%v/dockercli/busybox:nocreds", privateRegistryURL) + dockerCmd(c, "tag", repoName, repoName2) + dockerCmd(c, "--config", tmp, "push", repoName2) + + // logout should work w scheme also because it will be stripped + dockerCmd(c, "--config", tmp, "logout", "https://"+privateRegistryURL) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "pull", repoName) +} + +// TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest) +func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v:latest", repo) + repoTag2 := fmt.Sprintf("%v:t1", repo) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + dockerCmd(c, "tag", "busybox", repoTag2) + dockerCmd(c, "push", repo) + dockerCmd(c, "rmi", repoTag1) + dockerCmd(c, "rmi", repoTag2) + + out, _ := dockerCmd(c, "run", repo) + c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo)) + + // There should be only one line for repo, the one with repo:latest + outImageCmd, _ := dockerCmd(c, "images", repo) + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) +} diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go new file mode 100644 index 0000000..fd91edb --- /dev/null +++ b/integration-cli/docker_cli_pull_test.go @@ -0,0 +1,284 @@ +package main + +import ( + "fmt" + "regexp" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +// TestPullFromCentralRegistry pulls an image from the central registry and verifies that the client +// prints all expected output. +func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + out := s.Cmd(c, "pull", "hello-world") + defer deleteImages("hello-world") + + c.Assert(out, checker.Contains, "Using default tag: latest", check.Commentf("expected the 'latest' tag to be automatically assumed")) + c.Assert(out, checker.Contains, "Pulling from library/hello-world", check.Commentf("expected the 'library/' prefix to be automatically assumed")) + c.Assert(out, checker.Contains, "Downloaded newer image for hello-world:latest") + + matches := regexp.MustCompile(`Digest: (.+)\n`).FindAllStringSubmatch(out, -1) + c.Assert(len(matches), checker.Equals, 1, check.Commentf("expected exactly one image digest in the output")) + c.Assert(len(matches[0]), checker.Equals, 2, check.Commentf("unexpected number of submatches for the digest")) + _, err := digest.Parse(matches[0][1]) + c.Check(err, checker.IsNil, check.Commentf("invalid digest %q in output", matches[0][1])) + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullNonExistingImage pulls non-existing images from the central registry, with different +// combinations of implicit tag and library prefix. +func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + type entry struct { + repo string + alias string + tag string + } + + entries := []entry{ + {"asdfasdf", "asdfasdf", "foobar"}, + {"asdfasdf", "library/asdfasdf", "foobar"}, + {"asdfasdf", "asdfasdf", ""}, + {"asdfasdf", "asdfasdf", "latest"}, + {"asdfasdf", "library/asdfasdf", ""}, + {"asdfasdf", "library/asdfasdf", "latest"}, + } + + // The option field indicates "-a" or not. + type record struct { + e entry + option string + out string + err error + } + + // Execute 'docker pull' in parallel, pass results (out, err) and + // necessary information ("-a" or not, and the image name) to channel. + var group sync.WaitGroup + recordChan := make(chan record, len(entries)*2) + for _, e := range entries { + group.Add(1) + go func(e entry) { + defer group.Done() + repoName := e.alias + if e.tag != "" { + repoName += ":" + e.tag + } + out, err := s.CmdWithError("pull", repoName) + recordChan <- record{e, "", out, err} + }(e) + if e.tag == "" { + // pull -a on a nonexistent registry should fall back as well + group.Add(1) + go func(e entry) { + defer group.Done() + out, err := s.CmdWithError("pull", "-a", e.alias) + recordChan <- record{e, "-a", out, err} + }(e) + } + } + + // Wait for completion + group.Wait() + close(recordChan) + + // Process the results (out, err). + for record := range recordChan { + if len(record.option) == 0 { + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", record.e.repo), check.Commentf("expected image not found error messages")) + } else { + // pull -a on a nonexistent registry should fall back as well + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) + } + } + +} + +// TestPullFromCentralRegistryImplicitRefParts pulls an image from the central registry and verifies +// that pulling the same image with different combinations of implicit elements of the image +// reference (tag, repository, central registry url, ...) doesn't trigger a new pull nor leads to +// multiple images. +func (s *DockerHubPullSuite) TestPullFromCentralRegistryImplicitRefParts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Pull hello-world from v2 + pullFromV2 := func(ref string) (int, string) { + out := s.Cmd(c, "pull", "hello-world") + v1Retries := 0 + for strings.Contains(out, "this image was pulled from a legacy registry") { + // Some network errors may cause fallbacks to the v1 + // protocol, which would violate the test's assumption + // that it will get the same images. To make the test + // more robust against these network glitches, allow a + // few retries if we end up with a v1 pull. + + if v1Retries > 2 { + c.Fatalf("too many v1 fallback incidents when pulling %s", ref) + } + + s.Cmd(c, "rmi", ref) + out = s.Cmd(c, "pull", ref) + + v1Retries++ + } + + return v1Retries, out + } + + pullFromV2("hello-world") + defer deleteImages("hello-world") + + s.Cmd(c, "tag", "hello-world", "hello-world-backup") + + for _, ref := range []string{ + "hello-world", + "hello-world:latest", + "library/hello-world", + "library/hello-world:latest", + "docker.io/library/hello-world", + "index.docker.io/library/hello-world", + } { + var out string + for { + var v1Retries int + v1Retries, out = pullFromV2(ref) + + // Keep repeating the test case until we don't hit a v1 + // fallback case. We won't get the right "Image is up + // to date" message if the local image was replaced + // with one pulled from v1. + if v1Retries == 0 { + break + } + s.Cmd(c, "rmi", ref) + s.Cmd(c, "tag", "hello-world-backup", "hello-world") + } + c.Assert(out, checker.Contains, "Image is up to date for hello-world:latest") + } + + s.Cmd(c, "rmi", "hello-world-backup") + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullScratchNotAllowed verifies that pulling 'scratch' is rejected. +func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + out, err := s.CmdWithError("pull", "scratch") + c.Assert(err, checker.NotNil, check.Commentf("expected pull of scratch to fail")) + c.Assert(out, checker.Contains, "'scratch' is a reserved name") + c.Assert(out, checker.Not(checker.Contains), "Pulling repository scratch") +} + +// TestPullAllTagsFromCentralRegistry pulls using `all-tags` for a given image and verifies that it +// results in more images than a naked pull. +func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + s.Cmd(c, "pull", "busybox") + outImageCmd := s.Cmd(c, "images", "busybox") + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) + + s.Cmd(c, "pull", "--all-tags=true", "busybox") + outImageAllTagCmd := s.Cmd(c, "images", "busybox") + linesCount := strings.Count(outImageAllTagCmd, "\n") + c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) + + // Verify that the line for 'busybox:latest' is left unchanged. + var latestLine string + for _, line := range strings.Split(outImageAllTagCmd, "\n") { + if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { + latestLine = line + break + } + } + c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) + splitLatest := strings.Fields(latestLine) + splitCurrent := strings.Fields(splitOutImageCmd[1]) + + // Clear relative creation times, since these can easily change between + // two invocations of "docker images". Without this, the test can fail + // like this: + // ... obtained []string = []string{"busybox", "latest", "d9551b4026f0", "27", "minutes", "ago", "1.113", "MB"} + // ... expected []string = []string{"busybox", "latest", "d9551b4026f0", "26", "minutes", "ago", "1.113", "MB"} + splitLatest[3] = "" + splitLatest[4] = "" + splitLatest[5] = "" + splitCurrent[3] = "" + splitCurrent[4] = "" + splitCurrent[5] = "" + + c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) +} + +// TestPullClientDisconnect kills the client during a pull operation and verifies that the operation +// gets cancelled. +// +// Ref: docker/docker#15589 +func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "hello-world:latest" + + pullCmd := s.MakeCmd("pull", repoName) + stdout, err := pullCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + err = pullCmd.Start() + c.Assert(err, checker.IsNil) + + // Cancel as soon as we get some output. + buf := make([]byte, 10) + _, err = stdout.Read(buf) + c.Assert(err, checker.IsNil) + + err = pullCmd.Process.Kill() + c.Assert(err, checker.IsNil) + + time.Sleep(2 * time.Second) + _, err = s.CmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected")) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) { + // @TODO TestPullNoCredentialsNotFound expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported + s.d.StartWithBusybox(c, "--disable-legacy-registry=false") + + // we don't care about the actual image, we just want to see image not found + // because that means v2 call returned 401 and we fell back to v1 which usually + // gives a 404 (in this case the test registry doesn't handle v1 at all) + out, err := s.d.Cmd("pull", privateRegistryURL+"/busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image busybox:latest not found") +} + +// Regression test for https://github.com/docker/docker/issues/26429 +func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) { + testRequires(c, DaemonIsWindows, Network) + _, _, err := dockerCmdWithError("pull", "ubuntu") + c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") +} + +// Regression test for https://github.com/docker/docker/issues/28892 +func (s *DockerSuite) TestPullWindowsImageFailsOnLinux(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + _, _, err := dockerCmdWithError("pull", "microsoft/nanoserver") + c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") +} diff --git a/integration-cli/docker_cli_pull_trusted_test.go b/integration-cli/docker_cli_pull_trusted_test.go new file mode 100644 index 0000000..d9628d9 --- /dev/null +++ b/integration-cli/docker_cli_pull_trusted_test.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "io/ioutil" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-pull") + + // Try pull + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) + + cli.DockerCmd(c, "rmi", repoName) + // Try untrusted pull to ensure we pushed the tag to the registry + cli.Docker(cli.Args("pull", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloaded) +} + +func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-pull") + + // Try pull (run from isolated directory without trust information) + cli.Docker(cli.Args("--config", "/tmp/docker-isolated", "pull", repoName), trustedCmd).Assert(c, SuccessTagging) + + cli.DockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + // Try trusted pull on untrusted tag + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: remote trust data does not exist", + }) +} + +func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + + // Try pull + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + + c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Now, try pulling with the original client from this new trust server. This should fail because the new root is invalid. + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "could not rotate trust to a new trusted root", + }) +} + +func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-offline-pull") + + cli.Docker(cli.Args("pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error contacting notary server", + }) + // Do valid trusted pull to warm cache + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Try pull again with invalid notary server, should use cache + cli.Docker(cli.Args("pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, SuccessTagging) +} + +func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") + // tag the image and upload it to the private registry + cli.BuildCmd(c, repoName, build.WithDockerfile(` + FROM busybox + CMD echo trustedpulldelete + `)) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + cli.DockerCmd(c, "rmi", repoName) + + // Try pull + result := cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Success) + + matches := digestRegex.FindStringSubmatch(result.Combined()) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", result.Combined())) + pullDigest := matches[1] + + imageID := inspectField(c, repoName, "Id") + + imageByDigest := repoName + "@" + pullDigest + byDigestID := inspectField(c, imageByDigest, "Id") + + c.Assert(byDigestID, checker.Equals, imageID) + + // rmi of tag should also remove the digest reference + cli.DockerCmd(c, "rmi", repoName) + + _, err := inspectFieldWithError(imageByDigest, "Id") + c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // Push with targets first, initializing the repo + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "latest", "targets") + + // Try pull, check we retrieve from targets role + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Err: "retrieving target for targets role", + }) + + // Now we'll create the releases role, and try pushing and pulling + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // try a pull, check that we can still pull because we can still read the + // old tag in the targets role + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Err: "retrieving target for targets role", + }) + + // try a pull -a, check that it succeeds because we can still pull from the + // targets role + cli.Docker(cli.Args("-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Success) + + // Push, should sign with targets/releases + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") + + // Try pull, check we retrieve from targets/releases role + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Err: "retrieving target for targets/releases role", + }) + + // Create another delegation that we'll sign with + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[1].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) + s.notaryPublish(c, repoName) + + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") + + // Try pull, check we retrieve from targets/releases role + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Err: "retrieving target for targets/releases role", + }) +} + +func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // We'll create a repo first with a non-release delegation role, so that when we + // push we'll sign it into the delegation role + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // Push should write to the delegation role, not targets + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) + s.assertTargetInRoles(c, repoName, "latest", "targets/other") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull - we should fail, since pull will only pull from the targets/releases + // role or the targets role + cli.DockerCmd(c, "tag", "busybox", targetName) + cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No trust data for", + }) + + // try a pull -a: we should fail since pull will only pull from the targets/releases + // role or the targets role + cli.Docker(cli.Args("-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No trusted tags for", + }) +} diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go new file mode 100644 index 0000000..2ae206d --- /dev/null +++ b/integration-cli/docker_cli_push_test.go @@ -0,0 +1,604 @@ +package main + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/cli/config" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// Pushing an image to a private registry. +func testPushBusyboxImage(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) +} + +func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +// pushing an image without a prefix should throw an error +func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { + out, _, err := dockerCmdWithError("push", "busybox") + c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)) +} + +func testPushUntagged(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + expected := "An image does not exist locally with the tag" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func testPushBadTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) + expected := "does not exist" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func testPushMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) + repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + + dockerCmd(c, "tag", "busybox", repoTag2) + + dockerCmd(c, "push", repoName) + + // Ensure layer list is equivalent for repoTag1 and repoTag2 + out1, _ := dockerCmd(c, "pull", repoTag1) + + imageAlreadyExists := ": Image already exists" + var out1Lines []string + for _, outputLine := range strings.Split(out1, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + + out2, _ := dockerCmd(c, "pull", repoTag2) + + var out2Lines []string + for _, outputLine := range strings.Split(out2, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + c.Assert(out2Lines, checker.HasLen, len(out1Lines)) + + for i := range out1Lines { + c.Assert(out1Lines[i], checker.Equals, out2Lines[i]) + } +} + +func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func testPushEmptyLayer(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) + emptyTarball, err := ioutil.TempFile("", "empty_tarball") + c.Assert(err, check.IsNil, check.Commentf("Unable to create test file")) + + tw := tar.NewWriter(emptyTarball) + err = tw.Close() + c.Assert(err, check.IsNil, check.Commentf("Error creating empty tarball")) + + freader, err := os.Open(emptyTarball.Name()) + c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball")) + defer freader.Close() + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "import", "-", repoName}, + Stdin: freader, + }).Assert(c, icmd.Success) + + // Now verify we can push it + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) +} + +func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +// testConcurrentPush pushes multiple tags to the same repo +// concurrently. +func testConcurrentPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"push1", "push2", "push3"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s +`, repo))) + repos = append(repos, repo) + } + + // Push tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + result := icmd.RunCommand(dockerBinary, "push", repo) + results <- result.Error + }(repo) + } + + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent push failed with error: %v", err)) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull and run individual tags, to make sure pushes succeeded + for _, repo := range repos { + dockerCmd(c, "pull", repo) + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // ensure that layers were mounted from the first repo during push + c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Equals, digest2) + + // ensure that pushing again produces the same digest + out3, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + + digest3 := reference.DigestRegexp.FindString(out3) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest3, check.Equals, digest2) + + // ensure that we can pull and run the cross-repo-pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out4, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out4, check.Equals, "hello world") +} + +func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen + c.Assert(strings.Contains(out2, "Mounted from"), check.Equals, false) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Not(check.Equals), digest2) + + // ensure that we can pull and run the second pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out3, check.Equals, "hello world") +} + +func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Try pull after push + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Out: "Status: Image is up to date", + }) + + // Assert that we rotated the snapshot key to the server by checking our local keystore + contents, err := ioutil.ReadDir(filepath.Join(config.Dir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) + c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) + // Check that we only have 1 key (targets key) + c.Assert(contents, checker.HasLen, 1) +} + +func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmdWithPassphrases("12345678", "12345678")).Assert(c, SuccessSigningAndPushing) + + // Try pull after push + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Out: "Status: Image is up to date", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + // Using a name that doesn't resolve to an address makes this test faster + cli.Docker(cli.Args("push", repoName), trustedCmdWithServer("https://server.invalid:81/")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error contacting notary server", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + result := cli.Docker(cli.Args("push", "--disable-content-trust", repoName), trustedCmdWithServer("https://server.invalid:81/")) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Try pull after push + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ + Out: "Status: Image is up to date", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + // Do a trusted push + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Do another trusted push + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + + // Try pull to ensure the double push did not break our ability to pull + cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessDownloaded) +} + +func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Push with wrong passphrases + cli.Docker(cli.Args("push", repoName), trustedCmdWithPassphrases("12345678", "87654321")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "could not find necessary signing keys", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", targetName) + + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + // check to make sure that the target has been added to targets/releases and not targets + s.assertTargetInRoles(c, repoName, "latest", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(config.Dir(), "trust")) + + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + Out: "Status: Image is up to date", + }) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclimanyroles/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public) + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public) + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + + s.notaryCreateDelegation(c, repoName, "targets/role1/subrole", s.not.keys[3].Public) + s.notaryImportKey(c, repoName, "targets/role1/subrole", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", targetName) + + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // check to make sure that the target has been added to targets/role1 and targets/role2, and + // not targets (because there are delegations) or targets/role3 (due to missing key) or + // targets/role1/subrole (due to it being a second level delegation) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role2") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(config.Dir(), "trust")) + + // pull should fail because none of these are the releases role + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclirolesbykeysandpaths/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public, "l", "z") + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public, "x", "y") + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public, "latest") + s.notaryCreateDelegation(c, repoName, "targets/role4", s.not.keys[3].Public, "latest") + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + s.notaryImportKey(c, repoName, "targets/role4", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", targetName) + + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // check to make sure that the target has been added to targets/role1 and targets/role4, and + // not targets (because there are delegations) or targets/role2 (due to path restrictions) or + // targets/role3 (due to missing key) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role4") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(config.Dir(), "trust")) + + // pull should fail because none of these are the releases role + cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationnotsignable/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + // do not import any delegations key + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", targetName) + + cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "no valid signing keys", + }) + s.assertTargetNotInRoles(c, repoName, "latest", "targets", "targets/role1") +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "no basic auth credentials") +} + +// This may be flaky but it's needed not to regress on unauthorized push, see #21054 +func (s *DockerSuite) TestPushToCentralRegistryUnauthorized(c *check.C) { + testRequires(c, Network) + repoName := "test/busybox" + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") +} + +func getTestTokenService(status int, body string, retries int) *httptest.Server { + var mu sync.Mutex + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + if retries > 0 { + w.WriteHeader(http.StatusServiceUnavailable) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"errors":[{"code":"UNAVAILABLE","message":"cannot create token at this time"}]}`)) + retries-- + } else { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(body)) + } + mu.Unlock() + })) +} + +func (s *DockerRegistryAuthTokenSuite) TestPushTokenServiceUnauthResponse(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"errors": [{"Code":"UNAUTHORIZED", "message": "a message", "detail": null}]}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "unauthorized: a message") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnauthorized(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"error": "unauthorized"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "unauthorized: authentication required") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseError(c *check.C) { + ts := getTestTokenService(http.StatusTooManyRequests, `{"errors": [{"code":"TOOMANYREQUESTS","message":"out of tokens"}]}`, 3) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + // TODO: isolate test so that it can be guaranteed that the 503 will trigger xfer retries + //c.Assert(out, checker.Contains, "Retrying") + //c.Assert(out, checker.Not(checker.Contains), "Retrying in 15") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "toomanyrequests: out of tokens") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnparsable(c *check.C) { + ts := getTestTokenService(http.StatusForbidden, `no way`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], checker.Contains, "error parsing HTTP 403 response body: ") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseNoToken(c *check.C) { + ts := getTestTokenService(http.StatusOK, `{"something": "wrong"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "authorization server did not include a token in the response") +} diff --git a/integration-cli/docker_cli_registry_user_agent_test.go b/integration-cli/docker_cli_registry_user_agent_test.go new file mode 100644 index 0000000..6cbe6e7 --- /dev/null +++ b/integration-cli/docker_cli_registry_user_agent_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "regexp" + + "github.com/docker/docker/integration-cli/registry" + "github.com/go-check/check" +) + +// unescapeBackslashSemicolonParens unescapes \;() +func unescapeBackslashSemicolonParens(s string) string { + re := regexp.MustCompile(`\\;`) + ret := re.ReplaceAll([]byte(s), []byte(";")) + + re = regexp.MustCompile(`\\\(`) + ret = re.ReplaceAll([]byte(ret), []byte("(")) + + re = regexp.MustCompile(`\\\)`) + ret = re.ReplaceAll([]byte(ret), []byte(")")) + + re = regexp.MustCompile(`\\\\`) + ret = re.ReplaceAll([]byte(ret), []byte(`\`)) + + return string(ret) +} + +func regexpCheckUA(c *check.C, ua string) { + re := regexp.MustCompile("(?P.+) UpstreamClient(?P.+)") + substrArr := re.FindStringSubmatch(ua) + + c.Assert(substrArr, check.HasLen, 3, check.Commentf("Expected 'UpstreamClient()' with upstream client UA")) + dockerUA := substrArr[1] + upstreamUAEscaped := substrArr[2] + + // check dockerUA looks correct + reDockerUA := regexp.MustCompile("^docker/[0-9A-Za-z+]") + bMatchDockerUA := reDockerUA.MatchString(dockerUA) + c.Assert(bMatchDockerUA, check.Equals, true, check.Commentf("Docker Engine User-Agent malformed")) + + // check upstreamUA looks correct + // Expecting something like: Docker-Client/1.11.0-dev (linux) + upstreamUA := unescapeBackslashSemicolonParens(upstreamUAEscaped) + reUpstreamUA := regexp.MustCompile("^\\(Docker-Client/[0-9A-Za-z+]") + bMatchUpstreamUA := reUpstreamUA.MatchString(upstreamUA) + c.Assert(bMatchUpstreamUA, check.Equals, true, check.Commentf("(Upstream) Docker Client User-Agent malformed")) +} + +// registerUserAgentHandler registers a handler for the `/v2/*` endpoint. +// Note that a 404 is returned to prevent the client to proceed. +// We are only checking if the client sent a valid User Agent string along +// with the request. +func registerUserAgentHandler(reg *registry.Mock, result *string) { + reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + w.Write([]byte(`{"errors":[{"code": "UNSUPPORTED","message": "this is a mock registry"}]}`)) + var ua string + for k, v := range r.Header { + if k == "User-Agent" { + ua = v[0] + } + } + *result = ua + }) +} + +// TestUserAgentPassThrough verifies that when an image is pulled from +// a registry, the registry should see a User-Agent string of the form +// [docker engine UA] UpstreamClientSTREAM-CLIENT([client UA]) +func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) { + var ua string + + reg, err := registry.NewMock(c) + defer reg.Close() + c.Assert(err, check.IsNil) + registerUserAgentHandler(reg, &ua) + repoName := fmt.Sprintf("%s/busybox", reg.URL()) + + s.d.StartWithBusybox(c, "--insecure-registry", reg.URL()) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + dockerfile, err := makefile(tmp, fmt.Sprintf("FROM %s", repoName)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + + s.d.Cmd("build", "--file", dockerfile, tmp) + regexpCheckUA(c, ua) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) + regexpCheckUA(c, ua) + + s.d.Cmd("pull", repoName) + regexpCheckUA(c, ua) + + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + regexpCheckUA(c, ua) +} diff --git a/integration-cli/docker_cli_rename_test.go b/integration-cli/docker_cli_rename_test.go new file mode 100644 index 0000000..ea43022 --- /dev/null +++ b/integration-cli/docker_cli_rename_test.go @@ -0,0 +1,138 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + name := inspectField(c, cleanedContainerID, "Name") + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name = inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + +} + +func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) +} + +func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { + out := runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + + newName := "new_name" + ContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, ContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) + + out = runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + newContainerID := strings.TrimSpace(out) + name = inspectField(c, newContainerID, "Name") + c.Assert(name, checker.Equals, "/first_name", check.Commentf("Failed to reuse container name")) +} + +func (s *DockerSuite) TestRenameCheckNames(c *check.C) { + dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, newName, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + + result := dockerCmdWithResult("inspect", "-f={{.Name}}", "--type=container", "first_name") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such container: first_name", + }) +} + +func (s *DockerSuite) TestRenameInvalidName(c *check.C) { + runSleepingContainer(c, "--name", "myname") + + out, _, err := dockerCmdWithError("rename", "myname", "new:invalid") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "Invalid container name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "requires exactly 2 argument(s).", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname", "") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "", "newname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container with empty name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Contains, "myname", check.Commentf("Output of docker ps should have included 'myname': %s", out)) +} + +func (s *DockerSuite) TestRenameAnonymousContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "network", "create", "network1") + out, _ := dockerCmd(c, "create", "-it", "--net", "network1", "busybox", "top") + + anonymousContainerID := strings.TrimSpace(out) + + dockerCmd(c, "rename", anonymousContainerID, "container1") + dockerCmd(c, "start", "container1") + + count := "-c" + if testEnv.DaemonPlatform() == "windows" { + count = "-n" + } + + _, _, err := dockerCmdWithError("run", "--net", "network1", "busybox", "ping", count, "1", "container1") + c.Assert(err, check.IsNil, check.Commentf("Embedded DNS lookup fails after renaming anonymous container: %v", err)) +} + +func (s *DockerSuite) TestRenameContainerWithSameName(c *check.C) { + out := runSleepingContainer(c, "--name", "old") + ContainerID := strings.TrimSpace(out) + + out, _, err := dockerCmdWithError("rename", "old", "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", ContainerID, "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) +} + +// Test case for #23973 +func (s *DockerSuite) TestRenameContainerWithLinkedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + db1, _ := dockerCmd(c, "run", "--name", "db1", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "app1", "-d", "--link", "db1:/mysql", "busybox", "top") + dockerCmd(c, "rename", "app1", "app2") + out, _, err := dockerCmdWithError("inspect", "--format={{ .Id }}", "app2/mysql") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(db1)) +} diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go new file mode 100644 index 0000000..cf6b135 --- /dev/null +++ b/integration-cli/docker_cli_restart_test.go @@ -0,0 +1,309 @@ +package main + +import ( + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "foobar") + cleanedContainerID := getIDByName(c, "test") + + out, _ := dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", cleanedContainerID) + + // Wait until the container has stopped + err := waitInspect(cleanedContainerID, "{{.State.Running}}", "false", 20*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\nfoobar\n") +} + +func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") + + cleanedContainerID := strings.TrimSpace(out) + + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + getLogs := func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := dockerCmd(c, "logs", cleanedContainerID) + return out, nil + } + + // Wait 10 seconds for the 'echo' to appear in the logs + waitAndAssert(c, 10*time.Second, getLogs, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", "-t", "1", cleanedContainerID) + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + // Wait 10 seconds for first 'echo' appear (again) in the logs + waitAndAssert(c, 10*time.Second, getLogs, checker.Equals, "foobar\nfoobar\n") +} + +// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. +func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") + + cleanedContainerID := strings.TrimSpace(out) + out, err := inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + source, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", cleanedContainerID) + + out, err = inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + c.Assert(source, checker.Equals, sourceAfterRestart) +} + +func (s *DockerSuite) TestRestartDisconnectedContainer(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) + + // Run a container on the default bridge network + out, _ := dockerCmd(c, "run", "-d", "--name", "c0", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + // Disconnect the container from the network + out, err := dockerCmd(c, "network", "disconnect", "bridge", "c0") + c.Assert(err, check.NotNil, check.Commentf(out)) + + // Restart the container + dockerCmd(c, "restart", "c0") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=no", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "no") +} + +func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=always", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "always") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + // MaximumRetryCount=0 if the restart policy is always + c.Assert(MaximumRetryCount, checker.Equals, "0") +} + +func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { + out, _, err := dockerCmdWithError("create", "--restart=on-failure:-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "maximum retry count cannot be negative") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:1", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "1") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:0", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") + + out, _ = dockerCmd(c, "create", "--restart=on-failure", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") +} + +// a good container with --restart=on-failure:3 +// MaximumRetryCount!=0; RestartCount=0 +func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true") + + id := strings.TrimSpace(string(out)) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "0") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(MaximumRetryCount, checker.Equals, "3") + +} + +func (s *DockerSuite) TestRestartContainerSuccess(c *check.C) { + testRequires(c, SameHostDaemon) + + out := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartWithPolicyUserDefinedNetwork(c *check.C) { + // TODO Windows. This may be portable following HNS integration post TP5. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udNet") + + dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second", + "--link=first:foo", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Now kill the second container and let the restart policy kick in + pidStr := inspectField(c, "second", "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartPolicyAfterRestart(c *check.C) { + testRequires(c, SameHostDaemon) + + out := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "restart", id) + + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { + out1, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + out2, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") + + id1 := strings.TrimSpace(string(out1)) + id2 := strings.TrimSpace(string(out2)) + waitTimeout := 15 * time.Second + if testEnv.DaemonPlatform() == "windows" { + waitTimeout = 150 * time.Second + } + err := waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", id1) + dockerCmd(c, "restart", id2) + + // Make sure we can stop/start (regression test from a705e166cf3bcca62543150c2b3f9bfeae45ecfa) + dockerCmd(c, "stop", id1) + dockerCmd(c, "stop", id2) + dockerCmd(c, "start", id1) + dockerCmd(c, "start", id2) + + // Kill the containers, making sure the are stopped at the end of the test + dockerCmd(c, "kill", id1) + dockerCmd(c, "kill", id2) + err = waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) + err = waitInspect(id2, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSuite) TestRestartAutoRemoveContainer(c *check.C) { + out := runSleepingContainer(c, "--rm") + + id := strings.TrimSpace(string(out)) + dockerCmd(c, "restart", id) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, id[:12], check.Commentf("container should be restarted instead of removed: %v", out)) + + // Kill the container to make sure it will be removed + dockerCmd(c, "kill", id) +} diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go new file mode 100644 index 0000000..d281704 --- /dev/null +++ b/integration-cli/docker_cli_rm_test.go @@ -0,0 +1,87 @@ +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + tempDir, err := ioutil.TempDir("", "test-rm-container-with-removed-volume-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerCmd(c, "run", "--name", "losemyvolumes", "-v", tempDir+":"+prefix+slash+"test", "busybox", "true") + + err = os.RemoveAll(tempDir) + c.Assert(err, check.IsNil) + + dockerCmd(c, "rm", "-v", "losemyvolumes") +} + +func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "foo", "-v", prefix+slash+"srv", "busybox", "true") + + dockerCmd(c, "rm", "-v", "foo") +} + +func (s *DockerSuite) TestRmContainerRunning(c *check.C) { + createRunningContainer(c, "foo") + + res, _, err := dockerCmdWithError("rm", "foo") + c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) + c.Assert(res, checker.Contains, "cannot remove a running container") +} + +func (s *DockerSuite) TestRmContainerForceRemoveRunning(c *check.C) { + createRunningContainer(c, "foo") + + // Stop then remove with -f + dockerCmd(c, "rm", "-f", "foo") +} + +func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { + dockerfile1 := `FROM busybox:latest + ENTRYPOINT ["true"]` + img := "test-container-orphaning" + dockerfile2 := `FROM busybox:latest + ENTRYPOINT ["true"] + MAINTAINER Integration Tests` + + // build first dockerfile + buildImageSuccessfully(c, img, build.WithDockerfile(dockerfile1)) + img1 := getIDByName(c, img) + // run container on first image + dockerCmd(c, "run", img) + // rebuild dockerfile with a small addition at the end + buildImageSuccessfully(c, img, build.WithDockerfile(dockerfile2)) + // try to remove the image, should not error out. + out, _, err := dockerCmdWithError("rmi", img) + c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) + + // check if we deleted the first image + out, _ = dockerCmd(c, "images", "-q", "--no-trunc") + c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) + +} + +func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { + out, _, err := dockerCmdWithError("rm", "unknown") + c.Assert(err, checker.NotNil, check.Commentf("Expected error on rm unknown container, got none")) + c.Assert(out, checker.Contains, "No such container") +} + +func createRunningContainer(c *check.C, name string) { + runSleepingContainer(c, "-dt", "--name", name) +} diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go new file mode 100644 index 0000000..afbc4c2 --- /dev/null +++ b/integration-cli/docker_cli_rmi_test.go @@ -0,0 +1,338 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringid" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { + errSubstr := "is using it" + + // create a container + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + // try to delete the image + out, _, err := dockerCmdWithError("rmi", "busybox") + // Container is using image, should not be able to rmi + c.Assert(err, checker.NotNil) + // Container is using image, error message should contain errSubstr + c.Assert(out, checker.Contains, errSubstr, check.Commentf("Container: %q", cleanedContainerID)) + + // make sure it didn't delete the busybox name + images, _ := dockerCmd(c, "images") + // The name 'busybox' should not have been removed from images + c.Assert(images, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestRmiTag(c *check.C) { + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox", "utest:tag1") + dockerCmd(c, "tag", "busybox", "utest/docker:tag2") + dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+3, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest/docker:tag2") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+1, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } + dockerCmd(c, "rmi", "utest:tag1") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n"), check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } +} + +func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'").Combined() + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if testEnv.DaemonPlatform() == "windows" { + cli.WaitExited(c, containerID, 60*time.Second) + } + + cli.DockerCmd(c, "commit", containerID, "busybox-one") + + imagesBefore := cli.DockerCmd(c, "images", "-a").Combined() + cli.DockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") + cli.DockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") + + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() + // tag busybox to create 2 more images with same imageID + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) + + imgID := inspectField(c, "busybox-one:tag1", "Id") + + // run a container with the image + out = runSleepingContainerInImage(c, "busybox-one") + containerID = strings.TrimSpace(out) + + // first checkout without force it fails + // rmi tagged in multiple repos should have failed without force + cli.Docker(cli.Args("rmi", imgID)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)), + }) + + cli.DockerCmd(c, "stop", containerID) + cli.DockerCmd(c, "rmi", "-f", imgID) + + imagesAfter = cli.DockerCmd(c, "images", "-a").Combined() + // rmi -f failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) +} + +func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'").Combined() + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if testEnv.DaemonPlatform() == "windows" { + cli.WaitExited(c, containerID, 60*time.Second) + } + + cli.DockerCmd(c, "commit", containerID, "busybox-test") + + imagesBefore := cli.DockerCmd(c, "images", "-a").Combined() + cli.DockerCmd(c, "tag", "busybox-test", "utest:tag1") + cli.DockerCmd(c, "tag", "busybox-test", "utest:tag2") + cli.DockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + cli.DockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + { + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + imgID := inspectField(c, "busybox-test", "Id") + + // first checkout without force it fails + cli.Docker(cli.Args("rmi", imgID)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "(must be forced) - image is referenced in multiple repositories", + }) + + cli.DockerCmd(c, "rmi", "-f", imgID) + { + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() + // rmi failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) + } +} + +// See https://github.com/docker/docker/issues/14116 +func (s *DockerSuite) TestRmiImageIDForceWithRunningContainersAndMultipleTags(c *check.C) { + dockerfile := "FROM busybox\nRUN echo test 14116\n" + buildImageSuccessfully(c, "test-14116", build.WithDockerfile(dockerfile)) + imgID := getIDByName(c, "test-14116") + + newTag := "newtag" + dockerCmd(c, "tag", imgID, newTag) + runSleepingContainerInImage(c, imgID) + + out, _, err := dockerCmdWithError("rmi", "-f", imgID) + // rmi -f should not delete image with running containers + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "(cannot be forced) - image is being used by running container") +} + +func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { + container := "test-delete-tag" + newtag := "busybox:newtag" + bb := "busybox:latest" + dockerCmd(c, "tag", bb, newtag) + + dockerCmd(c, "run", "--name", container, bb, "/bin/true") + + out, _ := dockerCmd(c, "rmi", newtag) + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1) +} + +func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { + image := "busybox-clone" + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "--no-cache", "-t", image, "-"}, + Stdin: strings.NewReader(`FROM busybox +MAINTAINER foo`), + }).Assert(c, icmd.Success) + + dockerCmd(c, "run", "--name", "test-force-rmi", image, "/bin/true") + + dockerCmd(c, "rmi", "-f", image) +} + +func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { + newRepo := "127.0.0.1:5000/busybox" + oldRepo := "busybox" + newTag := "busybox:test" + dockerCmd(c, "tag", oldRepo, newRepo) + + dockerCmd(c, "run", "--name", "test", oldRepo, "touch", "/abcd") + + dockerCmd(c, "commit", "test", newTag) + + out, _ := dockerCmd(c, "rmi", newTag) + c.Assert(out, checker.Contains, "Untagged: "+newTag) +} + +func (s *DockerSuite) TestRmiForceWithMultipleRepositories(c *check.C) { + imageName := "rmiimage" + tag1 := imageName + ":tag1" + tag2 := imageName + ":tag2" + + buildImageSuccessfully(c, tag1, build.WithDockerfile(`FROM busybox + MAINTAINER "docker"`)) + dockerCmd(c, "tag", tag1, tag2) + + out, _ := dockerCmd(c, "rmi", "-f", tag2) + c.Assert(out, checker.Contains, "Untagged: "+tag2) + c.Assert(out, checker.Not(checker.Contains), "Untagged: "+tag1) + + // Check built image still exists + images, _ := dockerCmd(c, "images", "-a") + c.Assert(images, checker.Contains, imageName, check.Commentf("Built image missing %q; Images: %q", imageName, images)) +} + +func (s *DockerSuite) TestRmiBlank(c *check.C) { + out, _, err := dockerCmdWithError("rmi", " ") + // Should have failed to delete ' ' image + c.Assert(err, checker.NotNil) + // Wrong error message generated + c.Assert(out, checker.Not(checker.Contains), "no such id", check.Commentf("out: %s", out)) + // Expected error message not generated + c.Assert(out, checker.Contains, "image name cannot be blank", check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { + // Build 2 images for testing. + imageNames := []string{"test1", "test2"} + imageIds := make([]string, 2) + for i, name := range imageNames { + dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) + imageIds[i] = id + } + + // Create a long-running container. + runSleepingContainerInImage(c, imageNames[0]) + + // Create a stopped container, and then force remove its image. + dockerCmd(c, "run", imageNames[1], "true") + dockerCmd(c, "rmi", "-f", imageIds[1]) + + // Try to remove the image of the running container and see if it fails as expected. + out, _, err := dockerCmdWithError("rmi", "-f", imageIds[0]) + // The image of the running container should not be removed. + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "image is being used by running container", check.Commentf("out: %s", out)) +} + +// #13422 +func (s *DockerSuite) TestRmiUntagHistoryLayer(c *check.C) { + image := "tmp1" + // Build an image for testing. + dockerfile := `FROM busybox +MAINTAINER foo +RUN echo 0 #layer0 +RUN echo 1 #layer1 +RUN echo 2 #layer2 +` + buildImageSuccessfully(c, image, build.WithoutCache, build.WithDockerfile(dockerfile)) + out, _ := dockerCmd(c, "history", "-q", image) + ids := strings.Split(out, "\n") + idToTag := ids[2] + + // Tag layer0 to "tmp2". + newTag := "tmp2" + dockerCmd(c, "tag", idToTag, newTag) + // Create a container based on "tmp1". + dockerCmd(c, "run", "-d", image, "true") + + // See if the "tmp2" can be untagged. + out, _ = dockerCmd(c, "rmi", newTag) + // Expected 1 untagged entry + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1, check.Commentf("out: %s", out)) + + // Now let's add the tag again and create a container based on it. + dockerCmd(c, "tag", idToTag, newTag) + out, _ = dockerCmd(c, "run", "-d", newTag, "true") + cid := strings.TrimSpace(out) + + // At this point we have 2 containers, one based on layer2 and another based on layer0. + // Try to untag "tmp2" without the -f flag. + out, _, err := dockerCmdWithError("rmi", newTag) + // should not be untagged without the -f flag + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, cid[:12]) + c.Assert(out, checker.Contains, "(must force)") + + // Add the -f flag and test again. + out, _ = dockerCmd(c, "rmi", "-f", newTag) + // should be allowed to untag with the -f flag + c.Assert(out, checker.Contains, fmt.Sprintf("Untagged: %s:latest", newTag)) +} + +func (*DockerSuite) TestRmiParentImageFail(c *check.C) { + buildImageSuccessfully(c, "test", build.WithDockerfile(` + FROM busybox + RUN echo hello`)) + + id := inspectField(c, "busybox", "ID") + out, _, err := dockerCmdWithError("rmi", id) + c.Assert(err, check.NotNil) + if !strings.Contains(out, "image has dependent child images") { + c.Fatalf("rmi should have failed because it's a parent image, got %s", out) + } +} + +func (s *DockerSuite) TestRmiWithParentInUse(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "create", imageID) + cID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID = strings.TrimSpace(out) + + dockerCmd(c, "rmi", imageID) +} + +// #18873 +func (s *DockerSuite) TestRmiByIDHardConflict(c *check.C) { + dockerCmd(c, "create", "busybox") + + imgID := inspectField(c, "busybox:latest", "Id") + + _, _, err := dockerCmdWithError("rmi", imgID[:12]) + c.Assert(err, checker.NotNil) + + // check that tag was not removed + imgID2 := inspectField(c, "busybox:latest", "Id") + c.Assert(imgID, checker.Equals, imgID2) +} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go new file mode 100644 index 0000000..50cd513 --- /dev/null +++ b/integration-cli/docker_cli_run_test.go @@ -0,0 +1,4621 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/cli/build/fakecontext" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/go-check/check" + libcontainerUser "github.com/opencontainers/runc/libcontainer/user" +) + +// "test123" should be printed by docker run +func (s *DockerSuite) TestRunEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") + if out != "test123\n" { + c.Fatalf("container should've printed 'test123', got '%s'", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + if out != "test\n" { + c.Errorf("container should've printed 'test'") + } +} + +// docker run should not leak file descriptors. This test relies on Unix +// specific functionality and cannot run on Windows. +func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + c.Errorf("container should've printed '0 1 2 3', not: %s", out) + } +} + +// it should be possible to lookup Google DNS +// this will fail when Internet access is unavailable +func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) { + testRequires(c, Network, NotArm) + if testEnv.DaemonPlatform() == "windows" { + // nslookup isn't present in Windows busybox. Is built-in. Further, + // nslookup isn't present in nanoserver. Hence just use PowerShell... + dockerCmd(c, "run", testEnv.MinimalBaseImage(), "powershell", "Resolve-DNSName", "google.com") + } else { + dockerCmd(c, "run", "busybox", "nslookup", "google.com") + } + +} + +// the exit code should be 0 +func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { + dockerCmd(c, "run", "busybox", "true") +} + +// the exit code should be 1 +func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { + _, exitCode, err := dockerCmdWithError("run", "busybox", "false") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 1) +} + +// it should be possible to pipe in data via stdin to a process running in a container +func (s *DockerSuite) TestRunStdinPipe(c *check.C) { + // TODO Windows: This needs some work to make compatible. + testRequires(c, DaemonIsLinux) + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat"}, + Stdin: strings.NewReader("blahblah"), + }) + result.Assert(c, icmd.Success) + out := result.Stdout() + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + logsOut, _ := dockerCmd(c, "logs", out) + + containerLogs := strings.TrimSpace(logsOut) + if containerLogs != "blahblah" { + c.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + dockerCmd(c, "rm", out) +} + +// the container's ID should be printed when starting a container in detached mode +func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + rmOut, _ := dockerCmd(c, "rm", out) + + rmOut = strings.TrimSpace(rmOut) + if rmOut != out { + c.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } +} + +// the working directory should be set correctly +func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { + dir := "/root" + image := "busybox" + if testEnv.DaemonPlatform() == "windows" { + dir = `C:/Windows` + } + + // First with -w + out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("-w failed to set working directory") + } + + // Then with --workdir + out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("--workdir failed to set working directory") + } +} + +// pinging Google's DNS resolver should fail when we disable the networking +func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { + count := "-c" + image := "busybox" + if testEnv.DaemonPlatform() == "windows" { + count = "-n" + image = testEnv.MinimalBaseImage() + } + + // First using the long form --net + out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8") + if err != nil && exitCode != 1 { + c.Fatal(out, err) + } + if exitCode != 1 { + c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } +} + +//test --link use container name to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") + + ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container name to link target failed") + } +} + +//test --link use container id to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") + + cID = strings.TrimSpace(cID) + ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container id to link target failed") + } +} + +func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in user-defined network udlinkNet with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping to third and its alias must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.NotNil) + + // start third container now + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top") + c.Assert(waitRun("third"), check.IsNil) + + // ping to third and its alias must succeed now + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart second container + dockerCmd(c, "restart", "second") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + for _, net := range defaults { + out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + + cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Check if default short-id alias is added automatically + id := strings.TrimSpace(cid1) + aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check if default short-id alias is added automatically + id = strings.TrimSpace(cid2) + aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + // ping to first and its network-scoped aliases + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its network-scoped aliases must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) +} + +// Issue 9677. +func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { + out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown flag: --exec-opt") +} + +// Regression test for #4979 +func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { + + var ( + out string + exitCode int + ) + + // Create a file in a volume + if testEnv.DaemonPlatform() == "windows" { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, testEnv.MinimalBaseImage(), "cmd", "/c", `echo hello > c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("1", out, exitCode) + } + + // Read the file from another container using --volumes-from to access the volume in the second container + if testEnv.DaemonPlatform() == "windows" { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", testEnv.MinimalBaseImage(), "cmd", "/c", `type c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("2", out, exitCode) + } +} + +// Volume path is a symlink which also exists on the host, and the host side is a file not a dir +// But the volume call is just a normal volume, not a bind mount +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink" + + dir, err := ioutil.TempDir("", name) + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + // In the case of Windows to Windows CI, if the machine is setup so that + // the temp directory is not the C: drive, this test is invalid and will + // not work. + if testEnv.DaemonPlatform() == "windows" && strings.ToLower(dir[:1]) != "c" { + c.Skip("Requires TEMP to point to C: drive") + } + + f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) + if err != nil { + c.Fatal(err) + } + f.Close() + + if testEnv.DaemonPlatform() == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", testEnv.MinimalBaseImage(), dir, dir) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) + containerPath = "/test/test" + cmd = "true" + } + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +// Volume path is a symlink in the container +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink2" + + if testEnv.DaemonPlatform() == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", testEnv.MinimalBaseImage(), name, name) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name) + containerPath = "/test/test" + cmd = "true" + } + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { + if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { + var ( + volumeDir string + fileInVol string + ) + if testEnv.DaemonPlatform() == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + testRequires(c, DaemonIsLinux) + volumeDir = "/test" + fileInVol = `/test/file` + } + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + + if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +// Regression test for #1201 +func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { + var ( + volumeDir string + fileInVol string + ) + if testEnv.DaemonPlatform() == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + volumeDir = "/test" + fileInVol = "/test/file" + } + + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol) + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) { + c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out) + } + + dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol) +} + +func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + hostpath := testutil.RandomTmpDirPath("test", testEnv.DaemonPlatform()) + if err := os.MkdirAll(hostpath, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", hostpath, err) + } + defer os.RemoveAll(hostpath) + + dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this "rw" mode to be be ignored since the inherited volume is "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + } + + dockerCmd(c, "run", "--name", "parent2", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this to be read-only since both are "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + } +} + +// Test for GH#10618 +func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { + path1 := testutil.RandomTmpDirPath("test1", testEnv.DaemonPlatform()) + path2 := testutil.RandomTmpDirPath("test2", testEnv.DaemonPlatform()) + + someplace := ":/someplace" + if testEnv.DaemonPlatform() == "windows" { + // Windows requires that the source directory exists before calling HCS + testRequires(c, SameHostDaemon) + someplace = `:c:\someplace` + if err := os.MkdirAll(path1, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path1) + if err := os.MkdirAll(path2, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path2) + } + mountstr1 := path1 + someplace + mountstr2 := path2 + someplace + + if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + + // Test for https://github.com/docker/docker/issues/22093 + volumename1 := "test1" + volumename2 := "test2" + volume1 := volumename1 + someplace + volume2 := volumename2 + someplace + if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + // create failed should have create volume volumename1 or volumename2 + // we should remove volumename2 or volumename2 successfully + out, _ := dockerCmd(c, "volume", "ls") + if strings.Contains(out, volumename1) { + dockerCmd(c, "volume", "rm", volumename1) + } else { + dockerCmd(c, "volume", "rm", volumename2) + } +} + +// Test for #1351 +func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { + prefix := "" + if testEnv.DaemonPlatform() == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo") +} + +func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { + prefix := "" + if testEnv.DaemonPlatform() == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar") + dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") +} + +// this tests verifies the ID format for the container +func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { + out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true") + if err != nil { + c.Fatal(err) + } + if exit != 0 { + c.Fatalf("expected exit code 0 received %d", exit) + } + + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + c.Fatal(err) + } + if !match { + c.Fatalf("Invalid container ID: %s", out) + } +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func (s *DockerSuite) TestRunCreateVolume(c *check.C) { + prefix := "" + if testEnv.DaemonPlatform() == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") +} + +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { + // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) + testRequires(c, DaemonIsLinux) + workingDirectory, err := ioutil.TempDir("", "TestRunCreateVolumeWithSymlink") + image := "docker-test-createvolumewithsymlink" + + buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN ln -s home /bar`) + buildCmd.Dir = workingDirectory + err = buildCmd.Run() + if err != nil { + c.Fatalf("could not build '%s': %v", image, err) + } + + _, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") + c.Assert(err, checker.IsNil) + + _, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink") + if err != nil || exitCode != 0 { + c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + } + + _, err = os.Stat(volPath) + if !os.IsNotExist(err) { + c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } +} + +// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. +func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { + // This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, DaemonIsLinux) + + workingDirectory, err := ioutil.TempDir("", "TestRunVolumesFromSymlinkPath") + c.Assert(err, checker.IsNil) + name := "docker-test-volumesfromsymlinkpath" + prefix := "" + dfContents := `FROM busybox + RUN ln -s home /foo + VOLUME ["/foo/bar"]` + + if testEnv.DaemonPlatform() == "windows" { + prefix = `c:` + dfContents = `FROM ` + testEnv.MinimalBaseImage() + ` + RUN mkdir c:\home + RUN mklink /D c:\foo c:\home + VOLUME ["c:/foo/bar"] + ENTRYPOINT c:\windows\system32\cmd.exe` + } + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = strings.NewReader(dfContents) + buildCmd.Dir = workingDirectory + err = buildCmd.Run() + if err != nil { + c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + } + + out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name) + if err != nil || exitCode != 0 { + c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out) + } + + _, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } +} + +func (s *DockerSuite) TestRunExitCode(c *check.C) { + var ( + exit int + err error + ) + + _, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72") + + if err == nil { + c.Fatal("should not have a non nil error") + } + if exit != 72 { + c.Fatalf("expected exit code 72 received %d", exit) + } +} + +func (s *DockerSuite) TestRunUserDefaults(c *check.C) { + expected := "uid=0(root) gid=0(root)" + if testEnv.DaemonPlatform() == "windows" { + expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)" + } + out, _ := dockerCmd(c, "run", "busybox", "id") + if !strings.Contains(out, expected) { + c.Fatalf("expected '%s' got %s", expected, out) + } +} + +func (s *DockerSuite) TestRunUserByName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("expected root user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux, NotArm) + out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id") + if err != nil { + c.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserNotFound(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + _, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id") + if err == nil { + c.Fatal("unknown user should cause container to fail") + } +} + +func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { + sleepTime := "2" + group := sync.WaitGroup{} + group.Add(2) + + errChan := make(chan error, 2) + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + _, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime) + errChan <- err + }() + } + + group.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestRunEnvironment(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env"}, + Env: append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ), + }) + result.Assert(c, icmd.Success) + + actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + // The first two should not be tested here, those are "inherent" environment variable. This test validates + // the -e behavior, not the default environment variable (that could be subject to change) + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // not set in our local env that they're removed (if present) in + // the container + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env"}, + Env: appendBaseEnv(true), + }) + result.Assert(c, icmd.Success) + + actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // already in the env that we're overriding them + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env"}, + Env: appendBaseEnv(true, "HOSTNAME=bar"), + }) + result.Assert(c, icmd.Success) + + actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root2", + "HOSTNAME=bar", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { + if testEnv.DaemonPlatform() == "windows" { + // Windows busybox does not have ping. Use built in ping instead. + dockerCmd(c, "run", testEnv.MinimalBaseImage(), "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { + // TODO Windows: This is Linux specific as --link is not supported and + // this will be deprecated in favor of container networking model. + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "--name", "linked", "busybox", "true") + + _, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true") + if err == nil { + c.Fatal("Expected error") + } +} + +// #7851 hostname outside container shows FQDN, inside only shortname +// For testing purposes it is not required to set host's hostname directly +// and use "--net=host" (as the original issue submitter did), as the same +// codepath is executed with "docker run -h ". Both were manually +// tested, but this testcase takes the simpler path of using "run -h .." +func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { + // TODO Windows: -h is not yet functional. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") + if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { + c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunGroupAdd(c *check.C) { + // Not applicable for Windows as there is no concept of --group-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id") + + groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777" + if actual := strings.Trim(out, "\r\n"); actual != groupsList { + c.Fatalf("expected output %s received %s", groupsList, actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotArm) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { + c.Fatal("sys should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { + c.Fatalf("sys should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { + c.Fatal("proc should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 { + c.Fatalf("proc should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + // TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } +} + +func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { + // Not applicable on Windows as it does not support chroot + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "busybox", "chroot", "/", "true") +} + +func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { + c.Fatalf("expected output /dev/nulo, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" { + c.Fatalf("expected output /dev/zero, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + _, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero") + if err == nil { + c.Fatalf("run container with device mode ro should fail") + } +} + +func (s *DockerSuite) TestRunModeHostname(c *check.C) { + // Not applicable on Windows as Windows does not support -h + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") + + if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { + c.Fatalf("expected 'testhostname', but says: %q", actual) + } + + out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") + + hostname, err := os.Hostname() + if err != nil { + c.Fatal(err) + } + if actual := strings.Trim(out, "\r\n"); actual != hostname { + c.Fatalf("expected %q, but says: %q", hostname, actual) + } +} + +func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { + out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") + expected := "/\n" + if testEnv.DaemonPlatform() == "windows" { + expected = "C:" + expected + } + if out != expected { + c.Fatalf("pwd returned %q (expected %s)", s, expected) + } +} + +func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { + if testEnv.DaemonPlatform() == "windows" { + // Windows busybox will fail with Permission Denied on items such as pagefile.sys + dockerCmd(c, "run", "-v", `c:\:c:\host`, testEnv.MinimalBaseImage(), "cmd", "-c", "dir", `c:\host`) + } else { + dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") + } +} + +func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { + mount := "/:/" + targetDir := "/host" + if testEnv.DaemonPlatform() == "windows" { + mount = `c:\:c\` + targetDir = "c:/host" // Forward slash as using busybox + } + out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir) + if err == nil { + c.Fatal(out, err) + } +} + +// Verify that a container gets default DNS when only localhost resolvers exist +func (s *DockerSuite) TestRunDNSDefaultOptions(c *check.C) { + // Not applicable on Windows as this is testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // preserve original resolv.conf for restoring after test + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + // defer restored original conf + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost + // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by + // GetNameservers(), leading to a replacement of nameservers with the default set + tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + + actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + // check that the actual defaults are appended to the commented out + // localhost resolver (which should be preserved) + // NOTE: if we ever change the defaults from google dns, this will break + expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if actual != expected { + c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) + } +} + +func (s *DockerSuite) TestRunDNSOptions(c *check.C) { + // Not applicable on Windows as Windows does not support --dns*, or + // the Unix-specific functionality of resolv.conf. + testRequires(c, DaemonIsLinux) + result := cli.DockerCmd(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") + + // The client will get a warning on stderr when setting DNS to a localhost address; verify this: + if !strings.Contains(result.Stderr(), "Localhost DNS setting") { + c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", result.Stderr()) + } + + actual := strings.Replace(strings.Trim(result.Stdout(), "\r\n"), "\n", " ", -1) + if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { + c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) + } + + out := cli.DockerCmd(c, "run", "--dns=1.1.1.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf").Combined() + + actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) + if actual != "nameserver 1.1.1.1 options ndots:3" { + c.Fatalf("expected 'nameserver 1.1.1.1 options ndots:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf").Stdout() + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { + c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) { + // Not applicable on Windows as testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP) + hostSearch := resolvconf.GetSearchDomains(origResolvConf) + + var out string + out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") + + if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" { + c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + } + + actualSearch := resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } + + out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP) + if len(actualNameservers) != len(hostNameservers) { + c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers)) + } + for i := range actualNameservers { + if actualNameservers[i] != hostNameservers[i] { + c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i]) + } + } + + if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { + c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + } + + // test with file + tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostSearch = resolvconf.GetSearchDomains(resolvConf) + + out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + } + + actualSearch = resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } +} + +// Test to see if a non-root user can resolve a DNS name. Also +// check if the container resolv.conf file has at least 0644 perm. +func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { + // Not applicable on Windows as Windows does not support --user + testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm) + + dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") + + cID := getIDByName(c, "testperm") + + fmode := (os.FileMode)(0644) + finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) + if err != nil { + c.Fatal(err) + } + + if (finfo.Mode() & fmode) != fmode { + c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String()) + } +} + +// Test if container resolv.conf gets updated the next time it restarts +// if host /etc/resolv.conf has changed. This only applies if the container +// uses the host's /etc/resolv.conf and does not have any dns options provided. +func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { + // Not applicable on Windows as testing unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + c.Skip("Unstable test, to be re-activated once #19937 is resolved") + + tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") + tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") + + //take a copy of resolv.conf for restoring after test completes + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // This test case is meant to test monitoring resolv.conf when it is + // a regular file not a bind mounc. So we unmount resolv.conf and replace + // it with a file containing the original settings. + mounted, err := mount.Mounted("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + if mounted { + icmd.RunCommand("umount", "/etc/resolv.conf").Assert(c, icmd.Success) + } + + //cleanup + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + }() + + //1. test that a restarting container gets an updated resolv.conf + dockerCmd(c, "run", "--name=first", "busybox", "true") + containerID1 := getIDByName(c, "first") + + // replace resolv.conf with our temporary copy + bytesResolvConf := []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // check for update in container + containerResolv := readContainerFile(c, containerID1, "resolv.conf") + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) + } + + /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } */ + //2. test that a restarting container does not receive resolv.conf updates + // if it modified the container copy of the starting point resolv.conf + dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") + containerID2 := getIDByName(c, "second") + + //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // start the container again + dockerCmd(c, "start", "second") + + // check for update in container + containerResolv = readContainerFile(c, containerID2, "resolv.conf") + if bytes.Equal(containerResolv, resolvConfSystem) { + c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) + } + + //3. test that a running container's resolv.conf is not modified while running + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + runningContainerID := strings.TrimSpace(out) + + // replace resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // check for update in container + containerResolv = readContainerFile(c, runningContainerID, "resolv.conf") + if bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) + } + + //4. test that a running container's resolv.conf is updated upon restart + // (the above container is still running..) + dockerCmd(c, "restart", runningContainerID) + + // check for update in container + containerResolv = readContainerFile(c, runningContainerID, "resolv.conf") + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) + } + + //5. test that additions of a localhost resolver are cleaned from + // host resolv.conf before updating container's resolv.conf copies + + // replace resolv.conf with a localhost-only nameserver copy + bytesResolvConf = []byte(tmpLocalhostResolvConf) + if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // our first exited container ID should have been updated, but with default DNS + // after the cleanup of resolv.conf found only a localhost nameserver: + containerResolv = readContainerFile(c, containerID1, "resolv.conf") + expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if !bytes.Equal(containerResolv, []byte(expected)) { + c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) + } + + //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update + // of containers' resolv.conf. + + // Restore the original resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // Run the container so it picks up the old settings + dockerCmd(c, "run", "--name=third", "busybox", "true") + containerID3 := getIDByName(c, "third") + + // Create a modified resolv.conf.aside and override resolv.conf with it + bytesResolvConf = []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "third") + + // check for update in container + containerResolv = readContainerFile(c, containerID3, "resolv.conf") + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) + } + + //cleanup, restore original resolv.conf happens in defer func() +} + +func (s *DockerSuite) TestRunAddHost(c *check.C) { + // Not applicable on Windows as it does not support --add-host + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") + + actual := strings.Trim(out, "\r\n") + if actual != "86.75.30.9\textra" { + c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode +// but using --attach instead of -a to make sure we read the flag correctly +func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { + icmd.RunCommand(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "Conflicting options: -a and -d", + }) +} + +func (s *DockerSuite) TestRunState(c *check.C) { + // TODO Windows: This needs some rework as Windows busybox does not support top + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + if pid1 == "0" { + c.Fatal("Container state Pid 0") + } + + dockerCmd(c, "stop", id) + state = inspectField(c, id, "State.Running") + if state != "false" { + c.Fatal("Container state is 'running'") + } + pid2 := inspectField(c, id, "State.Pid") + if pid2 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + + dockerCmd(c, "start", id) + state = inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid3 := inspectField(c, id, "State.Pid") + if pid3 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } +} + +// Test for #1737 +func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) { + // Not applicable on Windows as it does not support uid or gid in this way + testRequires(c, DaemonIsLinux) + name := "testrunvolumesuidgid" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`)) + + // Test that the uid and gid is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + } +} + +// Test for #1582 +func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { + // TODO Windows, post RS1. Windows does not yet support volume functionality + // that copies from the image to the volume. + testRequires(c, DaemonIsLinux) + name := "testruncopyvolumecontent" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`)) + + // Test that the content is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") + if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { + c.Fatal("Container failed to transfer content to volume") + } +} + +func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { + name := "testrunmdcleanuponentrypoint" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT ["echo"] + CMD ["testingpoint"]`)) + + out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) + if exit != 0 { + c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + } + out = strings.TrimSpace(out) + expected := "root" + if testEnv.DaemonPlatform() == "windows" { + if strings.Contains(testEnv.MinimalBaseImage(), "windowsservercore") { + expected = `user manager\containeradministrator` + } else { + expected = `ContainerAdministrator` // nanoserver + } + } + if out != expected { + c.Fatalf("Expected output %s, got %q. %s", expected, out, testEnv.MinimalBaseImage()) + } +} + +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { + existingFile := "/bin/cat" + expected := "not a directory" + if testEnv.DaemonPlatform() == "windows" { + existingFile = `\windows\system32\ntdll.dll` + expected = `The directory name is invalid.` + } + + out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox") + if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) { + c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode) + } +} + +func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { + name := "testrunexitonstdinclose" + + meow := "/bin/cat" + delay := 60 + if testEnv.DaemonPlatform() == "windows" { + meow = "cat" + } + runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) + + stdin, err := runCmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + stdout, err := runCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := runCmd.Start(); err != nil { + c.Fatal(err) + } + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + c.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "hello" { + c.Fatalf("Output should be 'hello', got '%q'", line) + } + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + finish := make(chan error) + go func() { + finish <- runCmd.Wait() + close(finish) + }() + select { + case err := <-finish: + c.Assert(err, check.IsNil) + case <-time.After(time.Duration(delay) * time.Second): + c.Fatal("docker run failed to exit on stdin close") + } + state := inspectField(c, name, "State.Running") + + if state != "false" { + c.Fatal("Container must be stopped after stdin closing") + } +} + +// Test run -i --restart xxx doesn't hang +func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) { + name := "test-inter-restart" + + result := icmd.StartCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh"}, + Stdin: bytes.NewBufferString("exit 11"), + }) + c.Assert(result.Error, checker.IsNil) + defer func() { + dockerCmdWithResult("stop", name).Assert(c, icmd.Success) + }() + + result = icmd.WaitOnCmd(60*time.Second, result) + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 11}) +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteSpecialFilesAndNotCommit(c *check.C) { + // Cannot run on Windows as this files are not present in Windows + testRequires(c, DaemonIsLinux) + + testRunWriteSpecialFilesAndNotCommit(c, "writehosts", "/etc/hosts") + testRunWriteSpecialFilesAndNotCommit(c, "writehostname", "/etc/hostname") + testRunWriteSpecialFilesAndNotCommit(c, "writeresolv", "/etc/resolv.conf") +} + +func testRunWriteSpecialFilesAndNotCommit(c *check.C, name, path string) { + command := fmt.Sprintf("echo test2267 >> %s && cat %s", path, path) + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", command) + if !strings.Contains(out, "test2267") { + c.Fatalf("%s should contain 'test2267'", path) + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func eqToBaseDiff(out string, c *check.C) bool { + name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32) + dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello") + cID := getIDByName(c, name) + baseDiff, _ := dockerCmd(c, "diff", cID) + baseArr := strings.Split(baseDiff, "\n") + sort.Strings(baseArr) + outArr := strings.Split(out, "\n") + sort.Strings(outArr) + return sliceEq(baseArr, outArr) +} + +func sliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { + // Cannot run on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux) + name := "baddevice" + out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true") + + if err == nil { + c.Fatal("Run should fail with bad device") + } + expected := `"/etc": not a device node` + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunEntrypoint(c *check.C) { + name := "entrypoint" + + out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar") + expected := "foobar" + + if out != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunBindMounts(c *check.C) { + testRequires(c, SameHostDaemon) + if testEnv.DaemonPlatform() == "linux" { + testRequires(c, DaemonIsLinux, NotUserNamespace) + } + + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker-test-container") + if err != nil { + c.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + writeFile(path.Join(tmpDir, "touch-me"), "", c) + + // Test reading from a read-only bind mount + out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") + if !strings.Contains(out, "touch-me") { + c.Fatal("Container failed to read from bind mount") + } + + // test writing to bind mount + if testEnv.DaemonPlatform() == "windows" { + dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") + } else { + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") + } + + readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + + // test mounting to an illegal destination directory + _, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") + if err == nil { + c.Fatal("Container bind mounted illegal directory") + } + + // Windows does not (and likely never will) support mounting a single file + if testEnv.DaemonPlatform() != "windows" { + // test mount a file + dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") + content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + expected := "yotta" + if content != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, content) + } + } +} + +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpCidFile := path.Join(tmpDir, "cid") + + image := "emptyfs" + if testEnv.DaemonPlatform() == "windows" { + // Windows can't support an emptyfs image. Just use the regular Windows image + image = testEnv.MinimalBaseImage() + } + out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) + if err == nil { + c.Fatalf("Run without command must fail. out=%s", out) + } else if !strings.Contains(out, "No command specified") { + c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) + } + + if _, err := os.Stat(tmpCidFile); err == nil { + c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + } +} + +// #2098 - Docker cidFiles only contain short version of the containerId +//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid +func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + defer os.RemoveAll(tmpDir) + + out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") + + id := strings.TrimSpace(out) + buffer, err := ioutil.ReadFile(tmpCidFile) + if err != nil { + c.Fatal(err) + } + cid := string(buffer) + if len(cid) != 64 { + c.Fatalf("--cidfile should be a long id, not %q", id) + } + if cid != id { + c.Fatalf("cid must be equal to %s, got %s", id, cid) + } +} + +func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { + mac := "12:34:56:78:9a:bc" + var out string + if testEnv.DaemonPlatform() == "windows" { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") + mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs + } else { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") + } + + actualMac := strings.TrimSpace(out) + if actualMac != mac { + c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + } +} + +func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, DaemonIsLinux) + mac := "12:34:56:78:9a:bc" + out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") + + id := strings.TrimSpace(out) + inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress") + if inspectedMac != mac { + c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) + } +} + +// test docker run use an invalid mac address +func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { + out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox") + //use an invalid mac address should with an error out + if err == nil || !strings.Contains(out, "is not a valid mac address") { + c.Fatalf("run with an invalid --mac-address should with error out") + } +} + +func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out := cli.DockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top").Combined() + + id := strings.TrimSpace(out) + ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") + icmd.RunCommand("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT").Assert(c, icmd.Success) + + cli.DockerCmd(c, "rm", "-fv", id) + + cli.DockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") +} + +func (s *DockerSuite) TestRunPortInUse(c *check.C) { + // TODO Windows. The duplicate NAT message returned by Windows will be + // changing as is currently completely undecipherable. Does need modifying + // to run sh rather than top though as top isn't in Windows busybox. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + port := "1234" + dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top") + if err == nil { + c.Fatalf("Binding on used port must fail") + } + if !strings.Contains(out, "port is already allocated") { + c.Fatalf("Out must be about \"port is already allocated\", got %s", out) + } +} + +// https://github.com/docker/docker/issues/12148 +func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { + // TODO Windows. -P is not yet supported + testRequires(c, DaemonIsLinux) + // allocate a dynamic port to get the most recent + out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id, "80") + + strPort := strings.Split(strings.TrimSpace(out), ":")[1] + port, err := strconv.ParseInt(strPort, 10, 64) + if err != nil { + c.Fatalf("invalid port, got: %s, error: %s", strPort, err) + } + + // allocate a static port and a dynamic port together, with static port + // takes the next recent port in dynamic port range. + dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") +} + +// Regression test for #7792 +func (s *DockerSuite) TestRunMountOrdering(c *check.C) { + // TODO Windows: Post RS1. Windows does not support nested mounts. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + // Create a temporary tmpfs mounc. + fooDir := filepath.Join(tmpDir, "foo") + if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { + c.Fatalf("failed to mkdir at %s - %s", fooDir, err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", + "-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir), + "busybox:latest", "sh", "-c", + "ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me") +} + +// Regression test for https://github.com/docker/docker/issues/8259 +func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { + // Not applicable on Windows as Windows does not support volumes + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + linkPath := os.TempDir() + "/testlink2" + if err := os.Symlink(tmpDir, linkPath); err != nil { + c.Fatal(err) + } + defer os.RemoveAll(linkPath) + + // Create first container + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") + + // Create second container with same symlinked path + // This will fail if the referenced issue is hit with a "Volume exists" error + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") +} + +//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container +func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { + // While Windows supports volumes, it does not support --add-host hence + // this test is not applicable on Windows. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") + if !strings.Contains(out, "nameserver 127.0.0.1") { + c.Fatal("/etc volume mount hides /etc/resolv.conf") + } + + out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") + if !strings.Contains(out, "test123") { + c.Fatal("/etc volume mount hides /etc/hostname") + } + + out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") + out = strings.Replace(out, "\n", " ", -1) + if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { + c.Fatal("/etc volume mount hides /etc/hosts") + } +} + +func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { + // TODO Windows (Post RS1). Windows does not support volumes which + // are pre-populated such as is built in the dockerfile used in this test. + testRequires(c, DaemonIsLinux) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + buildImageSuccessfully(c, "dataimage", build.WithDockerfile(`FROM busybox + RUN ["mkdir", "-p", "/foo"] + RUN ["touch", "/foo/bar"]`)) + dockerCmd(c, "run", "--name", "test", "-v", prefix+slash+"foo", "busybox") + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + } + + tmpDir := testutil.RandomTmpDirPath("docker_test_bind_mount_copy_data", testEnv.DaemonPlatform()) + if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + } +} + +func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + c.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + c.Fatalf("Stdout contains output from pull: %s", stdout) + } +} + +func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + buildImageSuccessfully(c, "run_volumes_clean_paths", build.WithDockerfile(`FROM busybox + VOLUME `+prefix+`/foo/`)) + dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + + out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(testEnv.VolumesConfigPath())) { + c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(testEnv.VolumesConfigPath())) { + c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) + } +} + +// Regression test for #3631 +func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { + // TODO Windows: This should be able to run on Windows if can find an + // alternate to /dev/zero and /dev/stdout. + testRequires(c, DaemonIsLinux) + cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") + + stdout, err := cont.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := cont.Start(); err != nil { + c.Fatal(err) + } + n, err := testutil.ConsumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) + if err != nil { + c.Fatal(err) + } + + expected := 2 * 1024 * 2000 + if n != expected { + c.Fatalf("Expected %d, got %d", expected, n) + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { + // TODO Windows: -P is not currently supported. Also network + // settings are not propagated back. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + var ports nat.PortMap + if err := json.Unmarshal([]byte(portstr), &ports); err != nil { + c.Fatal(err) + } + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatalf("Port is not mapped for the port %s", port) + } + } +} + +func (s *DockerSuite) TestRunExposePort(c *check.C) { + out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out")) + c.Assert(out, checker.Contains, "invalid range format for --expose") +} + +func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostIpc, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc != out { + c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc == out { + c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) + } +} + +func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if parentContainerIpc != out { + c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) + } + + catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") + if catOutput != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) + } + + // check that /dev/mqueue is actually of mqueue type + grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") + if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { + c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) + } + + lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") + lsOutput = strings.Trim(lsOutput, "\n") + if lsOutput != "toto" { + c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run IPC from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunModePIDContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if parentContainerPid != out { + c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out) + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run PID from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + defer os.Remove("/dev/mqueue/toto") + defer os.Remove("/dev/shm/test") + volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm") + c.Assert(err, checker.IsNil) + if volPath != "/dev/shm" { + c.Fatalf("volumePath should have been /dev/shm, was %s", volPath) + } + + out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test") + if out != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", out) + } + + // Check that the mq was created + if _, err := os.Stat("/dev/mqueue/toto"); err != nil { + c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error()) + } +} + +func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if parentContainerNet != out { + c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) + } +} + +func (s *DockerSuite) TestRunModePIDHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostPid, err := os.Readlink("/proc/1/ns/pid") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid != out { + c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid == out { + c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) + } +} + +func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + hostUTS, err := os.Readlink("/proc/1/ns/uts") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS != out { + c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS == out { + c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) + } + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error()) +} + +func (s *DockerSuite) TestRunTLSVerify(c *check.C) { + // Remote daemons use TLS and this test is not applicable when TLS is required. + testRequires(c, SameHostDaemon) + if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 { + c.Fatalf("Should have worked: %v:\n%v", err, out) + } + + // Regardless of whether we specify true or false we need to + // test to make sure tls is turned on if --tlsverify is specified at all + result := dockerCmdWithResult("--tlsverify=false", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "error during connect"}) + + result = dockerCmdWithResult("--tlsverify=true", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "cert"}) +} + +func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { + // TODO Windows. Once moved to libnetwork/CNM, this may be able to be + // re-instated. + testRequires(c, DaemonIsLinux) + // first find allocator current position + out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id) + + out = strings.TrimSpace(out) + if out == "" { + c.Fatal("docker port command output is empty") + } + out = strings.Split(out, ":")[1] + lastPort, err := strconv.Atoi(out) + if err != nil { + c.Fatal(err) + } + port := lastPort + 1 + l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + c.Fatal(err) + } + defer l.Close() + + out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id = strings.TrimSpace(out) + dockerCmd(c, "port", id) +} + +func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) { + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("run should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("container is running but should have failed") + } +} + +func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { + addr := "00:16:3E:08:00:50" + args := []string{"run", "--mac-address", addr} + expected := addr + + if testEnv.DaemonPlatform() != "windows" { + args = append(args, "busybox", "ifconfig") + } else { + args = append(args, testEnv.MinimalBaseImage(), "ipconfig", "/all") + expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) + } + + if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) { + c.Fatalf("Output should have contained %q: %s", expected, out) + } +} + +func (s *DockerSuite) TestRunNetHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet == out { + c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) + } +} + +func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { + // TODO Windows. As Windows networking evolves and converges towards + // CNM, this test may be possible to enable on Windows. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") +} + +func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Container should have host network namespace") + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { + // TODO Windows. This may be possible to enable in the future. However, + // Windows does not currently support --expose, or populate the network + // settings seen through inspect. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + + var ports nat.PortMap + err := json.Unmarshal([]byte(portstr), &ports) + c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr)) + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatal("Port is not mapped for the port "+port, out) + } + } +} + +func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { + runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy") + out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name") + if out != "no" { + c.Fatalf("Set default restart policy failed") + } +} + +func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + timeout := 10 * time.Second + if testEnv.DaemonPlatform() == "windows" { + timeout = 120 * time.Second + } + + id := strings.TrimSpace(string(out)) + if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil { + c.Fatal(err) + } + + count := inspectField(c, id, "RestartCount") + if count != "3" { + c.Fatalf("Container was restarted %s times, expected %d", count, 3) + } + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + if MaximumRetryCount != "3" { + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + } +} + +func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + testPriv := true + // don't test privileged mode subtest if user namespaces enabled + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + testPriv = false + } + testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me") +} + +func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { + // Not applicable on Windows due to use of Unix specific functionality, plus + // the use of --read-only which is not supported. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + // Ensure we have not broken writing /dev/pts + out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") + if status != 0 { + c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") + } + expected := "type devpts (rw," + if !strings.Contains(string(out), expected) { + c.Fatalf("expected output to contain %s but contains %s", expected, out) + } +} + +func testReadOnlyFile(c *check.C, testPriv bool, filenames ...string) { + touch := "touch " + strings.Join(filenames, " ") + out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } + + if !testPriv { + return + } + + out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { + // Not applicable on Windows which does not support --link + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") + if !strings.Contains(string(out), "testlinked") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *check.C) { + // Not applicable on Windows which does not support either --read-only or --dns. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") + if !strings.Contains(string(out), "1.1.1.1") { + c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(string(out), "testreadonly") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") + } +} + +func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo") + runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest") + + // Remove the main volume container and restart the consuming container + dockerCmd(c, "rm", "-f", "voltest") + + // This should not fail since the volumes-from were already applied + dockerCmd(c, "restart", "restarter") +} + +// run container with --rm should remove container if exit code != 0 +func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { + name := "flowers" + cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "ls", "/notexists")).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { + name := "sparkles" + cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "commandNotFound")).Assert(c, icmd.Expected{ + ExitCode: 127, + }) + out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunPIDHostWithChildIsKillable(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, NotUserNamespace) + name := "ibuildthecloud" + dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") + + c.Assert(waitRun(name), check.IsNil) + + errchan := make(chan error) + go func() { + if out, _, err := dockerCmdWithError("kill", name); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + select { + case err := <-errchan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Kill container timed out") + } +} + +func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { + // TODO Windows. This may be possible to enable once Windows supports + // memory limits on containers + testRequires(c, DaemonIsLinux) + // this memory limit is 1 byte less than the min, which is 4MB + // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 + out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox") + if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { + c.Fatalf("expected run to fail when using too low a memory limit: %q", out) + } +} + +func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") + if err == nil || code == 0 { + c.Fatal("standard container should not be able to write to /proc/asound") + } +} + +func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + // some kernels don't have this configured so skip the test if this file is not found + // on the host running the tests. + if _, err := os.Stat("/proc/latency_stats"); err != nil { + c.Skip("kernel doesn't have latency_stats configured") + return + } + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testReadPaths := []string{ + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/kcore", + } + for i, filePath := range testReadPaths { + name := fmt.Sprintf("procsieve-%d", i) + shellCmd := fmt.Sprintf("exec 3<%s", filePath) + + out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if exitCode != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestMountIntoProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true") + if err == nil || code == 0 { + c.Fatal("container should not be able to mount into /proc") + } +} + +func (s *DockerSuite) TestMountIntoSys(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + testRequires(c, NotUserNamespace) + dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") +} + +func (s *DockerSuite) TestRunUnshareProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + // In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run. + errChan := make(chan error) + + go func() { + name := "acidburn" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount") + if err == nil || + !(strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + go func() { + name := "cereal" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + /* Ensure still fails if running privileged with the default policy */ + go func() { + name := "crashoverride" + out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + var retErr error + for i := 0; i < 3; i++ { + err := <-errChan + if retErr == nil && err != nil { + retErr = err + } + } + if retErr != nil { + c.Fatal(retErr) + } +} + +func (s *DockerSuite) TestRunPublishPort(c *check.C) { + // TODO Windows: This may be possible once Windows moves to libnetwork and CNM + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") + out, _ := dockerCmd(c, "port", "test") + out = strings.Trim(out, "\r\n") + if out != "" { + c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) + } +} + +// Issue #10184. +func (s *DockerSuite) TestDevicePermissions(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + const permissions = "crw-rw-rw-" + out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") + if status != 0 { + c.Fatalf("expected status 0, got %d", status) + } + if !strings.HasPrefix(out, permissions) { + c.Fatalf("output should begin with %q, got %q", permissions, out) + } +} + +func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +// https://github.com/docker/docker/pull/14498 +func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") + + dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") + + if testEnv.DaemonPlatform() != "windows" { + mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if mRO.RW { + c.Fatalf("Expected RO volume was RW") + } + } + + mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if !mRW.RW { + c.Fatalf("Expected RW volume was RO") + } +} + +func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testWritePaths := []string{ + /* modprobe and core_pattern should both be denied by generic + * policy of denials for /proc/sys/kernel. These files have been + * picked to be checked as they are particularly sensitive to writes */ + "/proc/sys/kernel/modprobe", + "/proc/sys/kernel/core_pattern", + "/proc/sysrq-trigger", + "/proc/kcore", + } + for i, filePath := range testWritePaths { + name := fmt.Sprintf("writeprocsieve-%d", i) + + shellCmd := fmt.Sprintf("exec 3>%s", filePath) + out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if code != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + expected := "test123" + + filename := createTmpFile(c, expected) + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i]) + if actual != expected { + c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux, UserNamespaceROMount) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i]) + if exitCode != 0 { + c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := s.setupTrustedImage(c, "trusted-run") + + // Try run + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Try untrusted run to ensure we pushed the tag to the registry + cli.Docker(cli.Args("run", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloadedOnStderr) +} + +func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + // Try trusted run on untrusted tag + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "does not have trust data for", + }) +} + +func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + + // Try run + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, SuccessTagging) + cli.DockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + if err != nil { + c.Fatalf("Restarting notary server failed.") + } + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + + // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. + cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "could not rotate trust to a new trusted root", + }) +} + +func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + _, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux) + + // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace + // itself, but pid>1 should not be able to trace pid1. + _, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net") + if exitCode == 0 { + c.Fatal("ptrace was not successfully restricted by AppArmor") + } +} + +func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor) + + _, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net") + if exitCode != 0 { + c.Fatal("ptrace of self failed.") + } +} + +func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace) + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo") + if exitCode == 0 { + // If our test failed, attempt to repair the host system... + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo") + if exitCode == 0 { + c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.") + } + } +} + +func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$") +} + +// run create container failed should clean up the container +func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) { + // TODO Windows. This may be possible to enable once link is supported + testRequires(c, DaemonIsLinux) + name := "unique_name" + _, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox") + c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!")) + + containerID, err := inspectFieldWithError(name, "Id") + c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID)) + c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID)) +} + +func (s *DockerSuite) TestRunNamedVolume(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar") + + out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunWithUlimits(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") + ul := strings.TrimSpace(out) + if ul != "42" { + c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + // cgroup-parent relative path + testRunContainerWithCgroupParent(c, "test", "cgroup-test") + + // cgroup-parent absolute path + testRunContainerWithCgroupParent(c, "/cgroup-parent/test", "cgroup-test-absolute") +} + +func testRunContainerWithCgroupParent(c *check.C, cgroupParent, name string) { + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := testutil.ParseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id := getIDByName(c, name) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + testRunInvalidCgroupParent(c, "../../../../../../../../SHOULD_NOT_EXIST", "SHOULD_NOT_EXIST", "cgroup-invalid-test") + + testRunInvalidCgroupParent(c, "/../../../../../../../../SHOULD_NOT_EXIST", "/SHOULD_NOT_EXIST", "cgroup-absolute-invalid-test") +} + +func testRunInvalidCgroupParent(c *check.C, cgroupParent, cleanCgroupParent, name string) { + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := testutil.ParseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id := getIDByName(c, name) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + + filename := "/sys/fs/cgroup/devices/test123" + out, _, err := dockerCmdWithError("run", "busybox", "touch", filename) + if err == nil { + c.Fatal("expected cgroup mount point to be read-only, touch file should fail") + } + expected := "Read-only file system" + if !strings.Contains(out, expected) { + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + } +} + +func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true") + if err == nil || !strings.Contains(out, "cannot join own network") { + c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out) + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithDNSMacHosts(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top") + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) { + c.Fatalf("run --net=container with --dns should error out") + } + + out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) { + c.Fatalf("run --net=container with --mac-address should error out") + } + + out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) { + c.Fatalf("run --net=container with --add-host should error out") + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -p should error out") + } + + out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -P should error out") + } + + out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) { + c.Fatalf("run --net=container with --expose should error out") + } +} + +func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { + // Not applicable on Windows which does not support --net=container or --link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") + dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") +} + +func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { + // TODO Windows: This may be possible to convert. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") + + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } + } + + if count != 1 { + c.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + } +} + +// Issue #4681 +func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { + if testEnv.DaemonPlatform() == "windows" { + dockerCmd(c, "run", "--net=none", testEnv.MinimalBaseImage(), "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { + // Windows does not support --net=container + testRequires(c, DaemonIsLinux, ExecSupport) + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") + out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") + out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") + + if out1 != out { + c.Fatal("containers with shared net namespace should have same hostname") + } +} + +func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { + // TODO Windows: Network settings are not currently propagated. This may + // be resolved in the future with the move to libnetwork and CNM. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + id := strings.TrimSpace(out) + res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress") + if res != "" { + c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + } +} + +func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { + // Not applicable as Windows does not support --net=host + testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") + dockerCmd(c, "stop", "first") + dockerCmd(c, "stop", "second") +} + +func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork") + dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run 1 container in testnetwork1 and another in testnetwork2 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check Isolation between containers : ping must fail + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) + // Connect first container to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + // ping must succeed now + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.IsNil) + + // Disconnect first container from testnetwork2 + dockerCmd(c, "network", "disconnect", "testnetwork2", "first") + // ping must fail again + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Network delete with active containers must fail + _, _, err := dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) + + dockerCmd(c, "stop", "first") + _, _, err = dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") + + // Stop second container and test ping failures on both networks + dockerCmd(c, "stop", "second") + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2") + c.Assert(err, check.NotNil) + + // Start second container and connectivity must be restored on both networks + dockerCmd(c, "start", "second") + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Run a container with --net=host + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + _, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + // Run second container in first container's network namespace + dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error()) +} + +func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error()) + + // create a container connected to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Connect second container to none network. it must fail as well + _, _, err = dockerCmdWithError("network", "connect", "none", "second") + c.Assert(err, check.NotNil) +} + +// #11957 - stdin with no tty does not exit if stdin is not closed even though container exited +func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true") + in, err := cmd.StdinPipe() + c.Assert(err, check.IsNil) + defer in.Close() + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + cmd.Stderr = stdout + c.Assert(cmd.Start(), check.IsNil) + + waitChan := make(chan error) + go func() { + waitChan <- cmd.Wait() + }() + + select { + case err := <-waitChan: + c.Assert(err, check.IsNil, check.Commentf(stdout.String())) + case <-time.After(30 * time.Second): + c.Fatal("timeout waiting for command to exit") + } +} + +func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +// TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' +func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { + name := "testNonExecutableCmd" + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "foo").Assert(c, icmd.Expected{ + ExitCode: 127, + Error: "exit status 127", + }) +} + +// TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. +func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { + name := "testNonExistingCmd" + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "/bin/foo").Assert(c, icmd.Expected{ + ExitCode: 127, + Error: "exit status 127", + }) +} + +// TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or +// 127 on Windows. The difference is that in Windows, the container must be started +// as that's when the check is made (and yes, by its design...) +func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { + expected := 126 + if testEnv.DaemonPlatform() == "windows" { + expected = 127 + } + name := "testCmdCannotBeInvoked" + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "/etc").Assert(c, icmd.Expected{ + ExitCode: expected, + Error: fmt.Sprintf("exit status %d", expected), + }) +} + +// TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { + icmd.RunCommand(dockerBinary, "run", "foo").Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "Unable to find image", + }) +} + +// TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestDockerFails(c *check.C) { + icmd.RunCommand(dockerBinary, "run", "-foo", "busybox").Assert(c, icmd.Expected{ + ExitCode: 125, + Error: "exit status 125", + }) +} + +// TestRunInvalidReference invokes docker run with a bad reference. +func (s *DockerSuite) TestRunInvalidReference(c *check.C) { + out, exit, _ := dockerCmdWithError("run", "busybox@foo") + if exit == 0 { + c.Fatalf("expected non-zero exist code; received %d", exit) + } + + if !strings.Contains(out, "invalid reference format") { + c.Fatalf(`Expected "invalid reference format" in output; got: %s`, out) + } +} + +// Test fix for issue #17854 +func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { + // Not applicable on Windows as it does not support Linux uid/gid ownership + testRequires(c, DaemonIsLinux) + name := "testetcfileownership" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN chown dockerio:dockerio /etc`)) + + // Test that dockerio ownership of /etc is retained at runtime + out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) { + testRequires(c, DaemonIsLinux) + + expected := "642" + out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj") + oomScoreAdj := strings.TrimSpace(out) + if oomScoreAdj != "642" { + c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } + out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } +} + +func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + icmd.RunCommand("mount", "--bind", tmpDir, tmpDir).Assert(c, icmd.Success) + icmd.RunCommand("mount", "--make-private", "--make-shared", tmpDir).Assert(c, icmd.Success) + + dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") + + // Make sure a bind mount under a shared volume propagated to host. + if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted { + c.Fatalf("Bind mount under shared volume did not propagate to host") + } + + mount.Unmount(path.Join(tmpDir, "mnt1")) +} + +func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Prepare a source directory with file in it. We will bind mount this + // directory and see if file shows up. + tmpDir2, err := ioutil.TempDir("", "volume-source2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + icmd.RunCommand("mount", "--bind", tmpDir, tmpDir).Assert(c, icmd.Success) + icmd.RunCommand("mount", "--make-private", "--make-shared", tmpDir).Assert(c, icmd.Success) + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") + + // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside + // container then contents of tmpDir2/slave-testfile should become + // visible at "/volume-dest/mnt1/slave-testfile" + icmd.RunCommand("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")).Assert(c, icmd.Success) + + out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") + + mount.Unmount(path.Join(tmpDir, "mnt1")) + + if out != "Test" { + c.Fatalf("Bind mount under slave volume did not propagate to container") + } +} + +func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, exitCode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile") + c.Assert(exitCode, checker.Not(checker.Equals), 0) + c.Assert(out, checker.Contains, "invalid mount config") +} + +func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { + testRequires(c, DaemonIsLinux) + + testImg := "testvolumecopy" + buildImageSuccessfully(c, testImg, build.WithDockerfile(` + FROM busybox + RUN mkdir -p /foo && echo hello > /foo/hello + `)) + + dockerCmd(c, "run", "-v", "foo:/foo", testImg) + out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") + + dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "rm", "-fv", "test") + dockerCmd(c, "volume", "inspect", "test") + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") + + // Remove the parent so there are not other references to the volumes + dockerCmd(c, "rm", "-f", "parent") + // now remove the child and ensure the named volume (and only the named volume) still exists + dockerCmd(c, "rm", "-fv", "child") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + runSleepingContainer(c, "--name=test", "-p", "8000:8000") + + // Wait until container is fully up and running + c.Assert(waitRun("test"), check.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true") + // We will need the following `inspect` to diagnose the issue if test fails (#21247) + out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test") + out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail") + c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)) + // check for windows error as well + // TODO Windows Post TP5. Fix the error message string + c.Assert(strings.Contains(string(out), "port is already allocated") || + strings.Contains(string(out), "were not connected because a duplicate name exists") || + strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") || + strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out)) + dockerCmd(c, "rm", "-f", "test") + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Test for one character directory name case (#20122) +func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo") + c.Assert(strings.TrimSpace(out), checker.Equals, "/foo") +} + +func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume + buildImageSuccessfully(c, "volumecopy", build.WithDockerfile(`FROM busybox + RUN mkdir /foo && echo hello > /foo/bar + CMD cat /foo/bar`)) + dockerCmd(c, "volume", "create", "test") + + // test with the nocopy flag + out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // test default behavior which is to copy for non-binds + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + // error out when the volume is already populated + out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // do not error out when copy isn't explicitly set even though it's already populated + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // do not allow copy modes on volumes-from + dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true") + out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // do not allow copy modes on binds + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "nameserver 127.0.0.1" + expectedWarning := "Localhost DNS setting" + cli.DockerCmd(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + Err: expectedWarning, + }) + + expectedOutput = "nameserver 1.2.3.4" + cli.DockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) + + expectedOutput = "search example.com" + cli.DockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) + + expectedOutput = "options timeout:3" + cli.DockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) + + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out := cli.DockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf").Combined() + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "1.2.3.4\textra" + out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSuite) TestRunRmAndWait(c *check.C) { + dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2") + + out, code, err := dockerCmdWithError("wait", "test") + c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code)) + c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code)) + c.Assert(code, checker.Equals, 0) +} + +// Test that auto-remove is performed by the daemon (API 1.25 and above) +func (s *DockerSuite) TestRunRm(c *check.C) { + name := "miss-me-when-im-gone" + cli.DockerCmd(c, "run", "--name="+name, "--rm", "busybox") + + cli.Docker(cli.Inspect(name), cli.Format(".name")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No such object: " + name, + }) +} + +// Test that auto-remove is performed by the client on API versions that do not support daemon-side api-remove (API < 1.25) +func (s *DockerSuite) TestRunRmPre125Api(c *check.C) { + name := "miss-me-when-im-gone" + envs := appendBaseEnv(false, "DOCKER_API_VERSION=1.24") + cli.Docker(cli.Args("run", "--name="+name, "--rm", "busybox"), cli.WithEnvironmentVariables(envs...)).Assert(c, icmd.Success) + + cli.Docker(cli.Inspect(name), cli.Format(".name")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No such object: " + name, + }) +} + +// Test case for #23498 +func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + })) + defer ctx.Close() + + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "foo") + + // CMD will be reset as well (the same as setting a custom entrypoint) + cli.Docker(cli.Args("run", "--entrypoint=", "-t", name)).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "No command specified", + }) +} + +func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { + s.d.StartWithBusybox(c, "--debug", "--default-ulimit=nofile=65535") + + name := "test-A" + _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.WaitRun(name), check.IsNil) + + out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=65535:65535]") + + name = "test-B" + _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.WaitRun(name), check.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=42:42]") +} + +func (s *DockerSuite) TestRunStoppedLoggingDriverNoLeak(c *check.C) { + c.Skip("Splunk log-driver isn't supported") + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out)) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Handles error conditions for --credentialspec. Validating E2E success cases +// requires additional infrastructure (AD for example) on CI servers. +func (s *DockerSuite) TestRunCredentialSpecFailures(c *check.C) { + testRequires(c, DaemonIsWindows) + attempts := []struct{ value, expectedError string }{ + {"rubbish", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"rubbish://", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"file://", "no value supplied for file:// credential spec security option"}, + {"registry://", "no value supplied for registry:// credential spec security option"}, + {`file://c:\blah.txt`, "path cannot be absolute"}, + {`file://doesnotexist.txt`, "The system cannot find the file specified"}, + } + for _, attempt := range attempts { + _, _, err := dockerCmdWithError("run", "--security-opt=credentialspec="+attempt.value, "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf("%s expected non-nil err", attempt.value)) + c.Assert(err.Error(), checker.Contains, attempt.expectedError, check.Commentf("%s expected %s got %s", attempt.value, attempt.expectedError, err)) + } +} + +// Windows specific test to validate credential specs with a well-formed spec. +// Note it won't actually do anything in CI configuration with the spec, but +// it should not fail to run a container. +func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + validCS := readFile(`fixtures\credentialspecs\valid.json`, c) + writeFile(filepath.Join(testEnv.DockerBasePath(), `credentialspecs\valid.json`), validCS, c) + dockerCmd(c, "run", `--security-opt=credentialspec=file://valid.json`, "busybox", "true") +} + +// Windows specific test to ensure that a servicing app container is started +// if necessary once a container exits. It does this by forcing a no-op +// servicing event and verifying the event from Hyper-V-Compute +func (s *DockerSuite) TestRunServicingContainer(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + + out := cli.DockerCmd(c, "run", "-d", testEnv.MinimalBaseImage(), "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255").Combined() + containerID := strings.TrimSpace(out) + cli.WaitExited(c, containerID, 60*time.Second) + + result := icmd.RunCommand("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`) + result.Assert(c, icmd.Success) + out2 := result.Combined() + c.Assert(out2, checker.Contains, `"Servicing":true`, check.Commentf("Servicing container does not appear to have been started: %s", out2)) + c.Assert(out2, checker.Contains, `Windows Container (Servicing)`, check.Commentf("Didn't find 'Windows Container (Servicing): %s", out2)) + c.Assert(out2, checker.Contains, containerID+"_servicing", check.Commentf("Didn't find '%s_servicing': %s", containerID+"_servicing", out2)) +} + +func (s *DockerSuite) TestRunDuplicateMount(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + tmpFile, err := ioutil.TempFile("", "touch-me") + c.Assert(err, checker.IsNil) + defer tmpFile.Close() + + data := "touch-me-foo-bar\n" + if _, err := tmpFile.Write([]byte(data)); err != nil { + c.Fatal(err) + } + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "-v", "/tmp:/tmp", "-v", "/tmp:/tmp", "busybox", "sh", "-c", "cat "+tmpFile.Name()+" && ls /") + c.Assert(out, checker.Not(checker.Contains), "tmp:") + c.Assert(out, checker.Contains, data) + + out = inspectFieldJSON(c, name, "Config.Volumes") + c.Assert(out, checker.Contains, "null") +} + +func (s *DockerSuite) TestRunWindowsWithCPUCount(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") +} + +func (s *DockerSuite) TestRunWindowsWithCPUShares(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-shares=1000", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +func (s *DockerSuite) TestRunWindowsWithCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +func (s *DockerSuite) TestRunProcessIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsProcess) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunHypervIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsHyperv) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +// Test for #25099 +func (s *DockerSuite) TestRunEmptyEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "invalid environment variable:" + + out, _, err := dockerCmdWithError("run", "-e", "", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=foo", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +// #28658 +func (s *DockerSuite) TestSlowStdinClosing(c *check.C) { + name := "testslowstdinclosing" + repeat := 3 // regression happened 50% of the time + for i := 0; i < repeat; i++ { + cmd := icmd.Cmd{ + Command: []string{dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat"}, + Stdin: &delayedReader{}, + } + done := make(chan error, 1) + go func() { + err := icmd.RunCmd(cmd).Error + done <- err + }() + + select { + case <-time.After(15 * time.Second): + c.Fatal("running container timed out") // cleanup in teardown + case err := <-done: + c.Assert(err, checker.IsNil) + } + } +} + +type delayedReader struct{} + +func (s *delayedReader) Read([]byte) (int, error) { + time.Sleep(500 * time.Millisecond) + return 0, io.EOF +} + +// #28823 (originally #28639) +func (s *DockerSuite) TestRunMountReadOnlyDevShm(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + emptyDir, err := ioutil.TempDir("", "test-read-only-dev-shm") + c.Assert(err, check.IsNil) + defer os.RemoveAll(emptyDir) + out, _, err := dockerCmdWithError("run", "--rm", "--read-only", + "-v", fmt.Sprintf("%s:/dev/shm:ro", emptyDir), + "busybox", "touch", "/dev/shm/foo") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Read-only file system") +} + +func (s *DockerSuite) TestRunMount(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // mnt1, mnt2, and testCatFooBar are commonly used in multiple test cases + tmpDir, err := ioutil.TempDir("", "mount") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + mnt1, mnt2 := path.Join(tmpDir, "mnt1"), path.Join(tmpDir, "mnt2") + if err := os.Mkdir(mnt1, 0755); err != nil { + c.Fatal(err) + } + if err := os.Mkdir(mnt2, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(mnt1, "test1"), []byte("test1"), 0644); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(mnt2, "test2"), []byte("test2"), 0644); err != nil { + c.Fatal(err) + } + testCatFooBar := func(cName string) error { + out, _ := dockerCmd(c, "exec", cName, "cat", "/foo/test1") + if out != "test1" { + return fmt.Errorf("%s not mounted on /foo", mnt1) + } + out, _ = dockerCmd(c, "exec", cName, "cat", "/bar/test2") + if out != "test2" { + return fmt.Errorf("%s not mounted on /bar", mnt2) + } + return nil + } + + type testCase struct { + equivalents [][]string + valid bool + // fn should be nil if valid==false + fn func(cName string) error + } + cases := []testCase{ + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/bar", mnt2), + }, + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/bar", mnt2), + }, + { + "--volume", mnt1 + ":/foo", + "--mount", fmt.Sprintf("type=bind,src=%s,target=/bar", mnt2), + }, + }, + valid: true, + fn: testCatFooBar, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/bar", mnt2), + }, + { + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,target=/bar", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/bar", mnt2), + }, + { + "--volume", mnt1 + ":/foo", + "--mount", fmt.Sprintf("type=volume,src=%s,target=/bar", mnt2), + }, + }, + valid: false, + fn: testCatFooBar, + }, + { + equivalents: [][]string{ + { + "--read-only", + "--mount", "type=volume,dst=/bar", + }, + }, + valid: true, + fn: func(cName string) error { + _, _, err := dockerCmdWithError("exec", cName, "touch", "/bar/icanwritehere") + return err + }, + }, + { + equivalents: [][]string{ + { + "--read-only", + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", "type=volume,dst=/bar", + }, + { + "--read-only", + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", "type=volume,dst=/bar", + }, + }, + valid: true, + fn: func(cName string) error { + out, _ := dockerCmd(c, "exec", cName, "cat", "/foo/test1") + if out != "test1" { + return fmt.Errorf("%s not mounted on /foo", mnt1) + } + _, _, err := dockerCmdWithError("exec", cName, "touch", "/bar/icanwritehere") + return err + }, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt2), + }, + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/foo", mnt2), + }, + { + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/foo", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,target=/foo", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--mount", "type=volume,target=/foo", + "--mount", "type=volume,target=/foo", + }, + }, + valid: false, + }, + } + + for i, testCase := range cases { + for j, opts := range testCase.equivalents { + cName := fmt.Sprintf("mount-%d-%d", i, j) + _, _, err := dockerCmdWithError(append([]string{"run", "-i", "-d", "--name", cName}, + append(opts, []string{"busybox", "top"}...)...)...) + if testCase.valid { + c.Assert(err, check.IsNil, + check.Commentf("got error while creating a container with %v (%s)", opts, cName)) + c.Assert(testCase.fn(cName), check.IsNil, + check.Commentf("got error while executing test for %v (%s)", opts, cName)) + dockerCmd(c, "rm", "-f", cName) + } else { + c.Assert(err, checker.NotNil, + check.Commentf("got nil while creating a container with %v (%s)", opts, cName)) + } + } + } +} + +// Test that passing a FQDN as hostname properly sets hostname, and +// /etc/hostname. Test case for 29100 +func (s *DockerSuite) TestRunHostnameFQDN(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "foobar.example.com\nfoobar.example.com\nfoobar\nexample.com\nfoobar.example.com" + out, _ := dockerCmd(c, "run", "--hostname=foobar.example.com", "busybox", "sh", "-c", `cat /etc/hostname && hostname && hostname -s && hostname -d && hostname -f`) + c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) + + out, _ = dockerCmd(c, "run", "--hostname=foobar.example.com", "busybox", "sh", "-c", `cat /etc/hosts`) + expectedOutput = "foobar.example.com foobar" + c.Assert(strings.TrimSpace(out), checker.Contains, expectedOutput) +} + +// Test case for 29129 +func (s *DockerSuite) TestRunHostnameInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "foobar\nfoobar" + out, _ := dockerCmd(c, "run", "--net=host", "--hostname=foobar", "busybox", "sh", "-c", `echo $HOSTNAME && hostname`) + c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) +} + +func (s *DockerSuite) TestRunAddDeviceCgroupRule(c *check.C) { + testRequires(c, DaemonIsLinux) + + deviceRule := "c 7:128 rwm" + + out, _ := dockerCmd(c, "run", "--rm", "busybox", "cat", "/sys/fs/cgroup/devices/devices.list") + if strings.Contains(out, deviceRule) { + c.Fatalf("%s shouldn't been in the device.list", deviceRule) + } + + out, _ = dockerCmd(c, "run", "--rm", fmt.Sprintf("--device-cgroup-rule=%s", deviceRule), "busybox", "grep", deviceRule, "/sys/fs/cgroup/devices/devices.list") + c.Assert(strings.TrimSpace(out), checker.Equals, deviceRule) +} + +// Verifies that running as local system is operating correctly on Windows +func (s *DockerSuite) TestWindowsRunAsSystem(c *check.C) { + testRequires(c, DaemonIsWindows) + if testEnv.DaemonKernelVersionNumeric() < 15000 { + c.Skip("Requires build 15000 or later") + } + out, _ := dockerCmd(c, "run", "--net=none", `--user=nt authority\system`, "--hostname=XYZZY", minimalBaseImage(), "cmd", "/c", `@echo %USERNAME%`) + c.Assert(strings.TrimSpace(out), checker.Equals, "XYZZY$") +} diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go new file mode 100644 index 0000000..b3d1b07 --- /dev/null +++ b/integration-cli/docker_cli_run_unix_test.go @@ -0,0 +1,1576 @@ +// +build !windows + +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #6509 +func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { + checkRedirect := func(command string) { + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), checker.IsNil) + ch := make(chan error) + go func() { + ch <- cmd.Wait() + close(ch) + }() + + select { + case <-time.After(10 * time.Second): + c.Fatal("command timeout") + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("wait err")) + } + } + + checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") + checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") +} + +// Test recursive bind mount works by default +func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { + // /tmp gets permission denied + testRequires(c, NotUserNamespace, SameHostDaemon) + tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a temporary tmpfs mount. + tmpfsDir := filepath.Join(tmpDir, "tmpfs") + c.Assert(os.MkdirAll(tmpfsDir, 0777), checker.IsNil, check.Commentf("failed to mkdir at %s", tmpfsDir)) + c.Assert(mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""), checker.IsNil, check.Commentf("failed to create a tmpfs mount at %s", tmpfsDir)) + + f, err := ioutil.TempFile(tmpfsDir, "touch-me") + c.Assert(err, checker.IsNil) + defer f.Close() + + out, _ := dockerCmd(c, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") + c.Assert(out, checker.Contains, filepath.Base(f.Name()), check.Commentf("Recursive bind mount test failed. Expected file not found")) +} + +func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, err := os.Stat("/dev/snd"); err != nil { + c.Skip("Host does not have /dev/snd") + } + + out, _ := dockerCmd(c, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "timer", check.Commentf("expected output /dev/snd/timer")) + + out, _ = dockerCmd(c, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "seq", check.Commentf("expected output /dev/othersnd/seq")) +} + +// TestRunAttachDetach checks attaching and detaching with the default escape sequence. +func (s *DockerSuite) TestRunAttachDetach(c *check.C) { + name := "attach-detach" + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + cmd.Stdin = tty + c.Assert(cmd.Start(), checker.IsNil) + c.Assert(waitRun(name), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + + out, err := bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + out, _ = dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container="+name) + // attach and detach event should be monitored + c.Assert(out, checker.Contains, "attach") + c.Assert(out, checker.Contains, "detach") +} + +// TestRunAttachDetachFromFlag checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { + name := "attach-detach" + keyCtrlA := []byte{1} + keyA := []byte{97} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunAttachDetachFromInvalidFlag checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "top") + c.Assert(waitRun(name), check.IsNil) + + // specify an invalid detach key, container will ignore it and use default + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-A,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + bufReader := bufio.NewReader(stdout) + out, err := bufReader.ReadString('\n') + if err != nil { + c.Fatal(err) + } + // it should print a warning to indicate the detach key flag is invalid + errStr := "Invalid detach keys (ctrl-A,a) provided" + c.Assert(strings.TrimSpace(out), checker.Equals, errStr) +} + +// TestRunAttachDetachFromConfig checks attaching and detaching with the escape sequence specified via config file. +func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-a,a" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunAttachDetachKeysOverrideConfig checks attaching and detaching with the detach flags, making sure it overrides config file +func (s *DockerSuite) TestRunAttachDetachKeysOverrideConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-e,e" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +func (s *DockerSuite) TestRunAttachInvalidDetachKeySequencePreserved(c *check.C) { + name := "attach-detach" + keyA := []byte{97} + keyB := []byte{98} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=a,b,c", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + // Invalid escape sequence aba, should print aba in output + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyB); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte("\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "aba" { + c.Fatalf("expected 'aba', got %q", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunWithCPUQuota(c *check.C) { + testRequires(c, cpuCfsQuota) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + out, _ := dockerCmd(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "8000") + + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "8000", check.Commentf("setting the CPU CFS quota failed")) +} + +func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000") + + out, _ = dockerCmd(c, "run", "--cpu-period", "0", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "100000") + + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "50000", check.Commentf("setting the CPU CFS period failed")) +} + +func (s *DockerSuite) TestRunWithInvalidCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + out, _, err := dockerCmdWithError("run", "--cpu-period", "900", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "2000000", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "-3", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + cli.DockerCmd(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file).Assert(c, icmd.Expected{ + Out: "52428800", + }) + + cli.InspectCmd(c, "test1", cli.Format(".HostConfig.KernelMemory")).Assert(c, icmd.Expected{ + Out: "52428800", + }) +} + +func (s *DockerSuite) TestRunWithInvalidKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + out, _, err := dockerCmdWithError("run", "--kernel-memory", "2M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum kernel memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--kernel-memory", "-16m", "--name", "test2", "busybox", "echo", "test") + c.Assert(err, check.NotNil) + expected = "invalid size" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithCPUShares(c *check.C) { + testRequires(c, cpuShare) + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "run", "--cpu-shares", "1000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *check.C) { + testRequires(c, cpuShare) + testRequires(c, memoryLimitSupport) + cli.DockerCmd(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test").Assert(c, icmd.Expected{ + Out: "test\n", + }) +} + +func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.cpus" + out, _ := dockerCmd(c, "run", "--cpuset-cpus", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetCpus") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.mems" + out, _ := dockerCmd(c, "run", "--cpuset-mems", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetMems") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + + file := "/sys/fs/cgroup/blkio/blkio.weight" + out, _ := dockerCmd(c, "run", "--blkio-weight", "300", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "300") + + out = inspectField(c, "test", "HostConfig.BlkioWeight") + c.Assert(out, check.Equals, "300") +} + +func (s *DockerSuite) TestRunWithInvalidBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight", "5", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "Range of blkio weight is from 10 to 1000" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioWeightDevice(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight-device", "/dev/sdX:100", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { + testRequires(c, memoryLimitSupport, swapMemorySupport) + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(600 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } +} + +func (s *DockerSuite) TestRunWithMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + cli.DockerCmd(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file).Assert(c, icmd.Expected{ + Out: "33554432", + }) + cli.InspectCmd(c, "test", cli.Format(".HostConfig.Memory")).Assert(c, icmd.Expected{ + Out: "33554432", + }) +} + +// TestRunWithoutMemoryswapLimit sets memory limit and disables swap +// memory limit, this means the processes in the container can use +// 16M memory and as much swap memory as they need (if the host +// supports swap memory). +func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + dockerCmd(c, "run", "-m", "32m", "--memory-swap", "-1", "busybox", "true") +} + +func (s *DockerSuite) TestRunWithSwappiness(c *check.C) { + testRequires(c, memorySwappinessSupport) + file := "/sys/fs/cgroup/memory/memory.swappiness" + out, _ := dockerCmd(c, "run", "--memory-swappiness", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.MemorySwappiness") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { + testRequires(c, memorySwappinessSupport) + out, _, err := dockerCmdWithError("run", "--memory-swappiness", "101", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Valid memory swappiness range is 0-100" + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) + + out, _, err = dockerCmdWithError("run", "--memory-swappiness", "-10", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) +} + +func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { + testRequires(c, memoryReservationSupport) + + file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" + out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "209715200") + + out = inspectField(c, "test", "HostConfig.MemoryReservation") + c.Assert(out, check.Equals, "209715200") +} + +func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, memoryReservationSupport) + out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum memory limit can not be less than memory reservation limit" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) + + out, _, err = dockerCmdWithError("run", "--memory-reservation", "1k", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Minimum memory reservation allowed is 4MB" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) +} + +func (s *DockerSuite) TestStopContainerSignal(c *check.C) { + out, _ := dockerCmd(c, "run", "--stop-signal", "SIGUSR1", "-d", "busybox", "/bin/sh", "-c", `trap 'echo "exit trapped"; exit 0' USR1; while true; do sleep 1; done`) + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + dockerCmd(c, "stop", containerID) + out, _ = dockerCmd(c, "logs", containerID) + + c.Assert(out, checker.Contains, "exit trapped", check.Commentf("Expected `exit trapped` in the log")) +} + +func (s *DockerSuite) TestRunSwapLessThanMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + out, _, err := dockerCmdWithError("run", "-m", "16m", "--memory-swap", "15m", "busybox", "echo", "test") + expected := "Minimum memoryswap limit should be larger than memory limit" + c.Assert(err, check.NotNil) + + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetCpusFlagValue(c *check.C) { + testRequires(c, cgroupCpuset, SameHostDaemon) + + sysInfo := sysinfo.New(true) + cpus, err := parsers.ParseUintList(sysInfo.Cpus) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(cpus)+1; i++ { + if !cpus[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-cpus", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested CPUs are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Cpus) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetMemsFlagValue(c *check.C) { + testRequires(c, cgroupCpuset) + + sysInfo := sysinfo.New(true) + mems, err := parsers.ParseUintList(sysInfo.Mems) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(mems)+1; i++ { + if !mems[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-mems", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested memory nodes are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Mems) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCPUShares(c *check.C) { + testRequires(c, cpuShare, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cpu-shares", "1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "The minimum allowed cpu-shares is 2" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "-1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "shares: invalid argument" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "99999999", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "The maximum allowed cpu-shares is" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithDefaultShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm-default" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "67108864") +} + +func (s *DockerSuite) TestRunWithShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm" + out, _ := dockerCmd(c, "run", "--name", name, "--shm-size=1G", "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "1073741824") +} + +func (s *DockerSuite) TestRunTmpfsMountsEnsureOrdered(c *check.C) { + tmpFile, err := ioutil.TempFile("", "test") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", "-v", tmpFile.Name()+":/run/test", "busybox", "ls", "/run") + c.Assert(out, checker.Contains, "test") +} + +func (s *DockerSuite) TestRunTmpfsMounts(c *check.C) { + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support tmpfs mounts. + testRequires(c, DaemonIsLinux) + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec,nosuid,rw,size=5k,mode=700", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run failed to mount on tmpfs with valid options %q %s", err, out) + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run:foobar", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("/run mounted on tmpfs when it should have vailed within invalid mount option") + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "-v", "/run:/run", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("Should have generated an error saying Duplicate mount points") + } +} + +func (s *DockerSuite) TestRunTmpfsMountsOverrideImageVolumes(c *check.C) { + name := "img-with-volumes" + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + VOLUME /run + RUN touch /run/stuff + `)) + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", name, "ls", "/run") + c.Assert(out, checker.Not(checker.Contains), "stuff") +} + +// Test case for #22420 +func (s *DockerSuite) TestRunTmpfsMountsWithOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOptions := []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ := dockerCmd(c, "run", "--tmpfs", "/tmp", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "relatime", "size=8192k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,exec,size=8192k", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,size=8192k,exec,size=4096k,noexec", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + // We use debian:jessie as there is no findmnt in busybox. Also the output will be in the format of + // TARGET PROPAGATION + // /tmp shared + // so we only capture `shared` here. + expectedOptions = []string{"shared"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:shared", "debian:jessie", "findmnt", "-o", "TARGET,PROPAGATION", "/tmp") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } +} + +func (s *DockerSuite) TestRunSysctls(c *check.C) { + testRequires(c, DaemonIsLinux) + var err error + + out, _ := dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=1", "--name", "test", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "1") + + out = inspectFieldJSON(c, "test", "HostConfig.Sysctls") + + sysctls := make(map[string]string) + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "1") + + out, _ = dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=0", "--name", "test1", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "0") + + out = inspectFieldJSON(c, "test1", "HostConfig.Sysctls") + + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "0") + + icmd.RunCommand(dockerBinary, "run", "--sysctl", "kernel.foobar=1", "--name", "test2", + "busybox", "cat", "/proc/sys/kernel/foobar").Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "invalid argument", + }) +} + +// TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp=/tmp/profile.json debian:jessie unshare' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshare(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + icmd.RunCommand(dockerBinary, "run", "--security-opt", "apparmor=unconfined", + "--security-opt", "seccomp="+tmpFile.Name(), + "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TestRunSeccompProfileDenyChmod checks that 'docker run --security-opt seccomp=/tmp/profile.json busybox chmod 400 /etc/hostname' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyChmod(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name":"fchmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name": "fchmodat", + "action":"SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp="+tmpFile.Name(), + "busybox", "chmod", "400", "/etc/hostname").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:jessie unshare --map-root-user --user sh -c whoami' with a specific profile to +// deny unshare of a userns exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + // from sched.h + jsonData := fmt.Sprintf(`{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO", + "args": [ + { + "index": 0, + "value": %d, + "op": "SCMP_CMP_EQ" + } + ] + } + ] +}`, uint64(0x10000000)) + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + icmd.RunCommand(dockerBinary, "run", + "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), + "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' +// with a the default seccomp profile exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + ensureSyscallTest(c) + + icmd.RunCommand(dockerBinary, "run", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "clone failed: Operation not permitted", + }) +} + +// TestRunSeccompUnconfinedCloneUserns checks that +// 'docker run --security-opt seccomp=unconfined syscall-test' allows creating a userns. +func (s *DockerSuite) TestRunSeccompUnconfinedCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace, unprivilegedUsernsClone) + ensureSyscallTest(c) + + // make sure running w privileged is ok + icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", + "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + Out: "nobody", + }) +} + +// TestRunSeccompAllowPrivCloneUserns checks that 'docker run --privileged syscall-test' +// allows creating a userns. +func (s *DockerSuite) TestRunSeccompAllowPrivCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace) + ensureSyscallTest(c) + + // make sure running w privileged is ok + icmd.RunCommand(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + Out: "nobody", + }) +} + +// TestRunSeccompProfileAllow32Bit checks that 32 bit code can run on x86_64 +// with the default seccomp profile. +func (s *DockerSuite) TestRunSeccompProfileAllow32Bit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, IsAmd64) + ensureSyscallTest(c) + + icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test", "id").Assert(c, icmd.Success) +} + +// TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds. +func (s *DockerSuite) TestRunSeccompAllowSetrlimit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + // ulimit uses setrlimit, so we want to make sure we don't break it + icmd.RunCommand(dockerBinary, "run", "debian:jessie", "bash", "-c", "ulimit -v 1048510").Assert(c, icmd.Success) +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileAcct(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 1: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 2: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 3: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "ALL", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileNS(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello1") + if err != nil || !strings.Contains(out, "hello1") { + c.Fatalf("test 1: expected hello1, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "all", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello2") + if err != nil || !strings.Contains(out, "hello2") { + c.Fatalf("test 2: expected hello2, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello3") + if err != nil || !strings.Contains(out, "hello3") { + c.Fatalf("test 3: expected hello3, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello4") + if err != nil || !strings.Contains(out, "hello4") { + c.Fatalf("test 5: expected hello4, got: %s, %v", out, err) + } +} + +// TestRunNoNewPrivSetuid checks that --security-opt='no-new-privileges=true' prevents +// effective uid transtions on executing setuid binaries. +func (s *DockerSuite) TestRunNoNewPrivSetuid(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + ensureNNPTest(c) + + // test that running a setuid binary results in no effective uid transition + icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges=true", "--user", "1000", + "nnp-test", "/usr/bin/nnp-test").Assert(c, icmd.Expected{ + Out: "EUID=1000", + }) +} + +// TestLegacyRunNoNewPrivSetuid checks that --security-opt=no-new-privileges prevents +// effective uid transtions on executing setuid binaries. +func (s *DockerSuite) TestLegacyRunNoNewPrivSetuid(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + ensureNNPTest(c) + + // test that running a setuid binary results in no effective uid transition + icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000", + "nnp-test", "/usr/bin/nnp-test").Assert(c, icmd.Expected{ + Out: "EUID=1000", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChown(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_CHOWN + dockerCmd(c, "run", "busybox", "chown", "100", "/tmp") + // test that non root user does not have default capability CAP_CHOWN + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chown", "100", "/tmp").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_CHOWN + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "chown", "busybox", "chown", "100", "/tmp").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_DAC_OVERRIDE + dockerCmd(c, "run", "busybox", "sh", "-c", "echo test > /etc/passwd") + // test that non root user does not have default capability CAP_DAC_OVERRIDE + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "sh", "-c", "echo test > /etc/passwd").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesFowner(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_FOWNER + dockerCmd(c, "run", "busybox", "chmod", "777", "/etc/passwd") + // test that non root user does not have default capability CAP_FOWNER + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chmod", "777", "/etc/passwd").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // TODO test that root user can drop default capability CAP_FOWNER +} + +// TODO CAP_KILL + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetuid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETUID + dockerCmd(c, "run", "syscall-test", "setuid-test") + // test that non root user does not have default capability CAP_SETUID + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setuid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_SETUID + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "setuid", "syscall-test", "setuid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetgid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETGID + dockerCmd(c, "run", "syscall-test", "setgid-test") + // test that non root user does not have default capability CAP_SETGID + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setgid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_SETGID + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "setgid", "syscall-test", "setgid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TODO CAP_SETPCAP + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_BIND_SERVICE + dockerCmd(c, "run", "syscall-test", "socket-test") + // test that non root user does not have default capability CAP_NET_BIND_SERVICE + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "socket-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) + // test that root user can drop default capability CAP_NET_BIND_SERVICE + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "net_bind_service", "syscall-test", "socket-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_RAW + dockerCmd(c, "run", "syscall-test", "raw-test") + // test that non root user does not have default capability CAP_NET_RAW + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "raw-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_NET_RAW + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "net_raw", "syscall-test", "raw-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChroot(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SYS_CHROOT + dockerCmd(c, "run", "busybox", "chroot", "/", "/bin/true") + // test that non root user does not have default capability CAP_SYS_CHROOT + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chroot", "/", "/bin/true").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_SYS_CHROOT + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "sys_chroot", "busybox", "chroot", "/", "/bin/true").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesMknod(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_MKNOD + dockerCmd(c, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2") + // test that non root user does not have default capability CAP_MKNOD + // test that root user can drop default capability CAP_SYS_CHROOT + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "mknod", "/tmp/node", "b", "1", "2").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) + // test that root user can drop default capability CAP_MKNOD + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "mknod", "busybox", "mknod", "/tmp/node", "b", "1", "2").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) +} + +// TODO CAP_AUDIT_WRITE +// TODO CAP_SETFCAP + +func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) { + testRequires(c, SameHostDaemon, Apparmor) + + // running w seccomp unconfined tests the apparmor profile + result := icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/cgroup") + result.Assert(c, icmd.Expected{ExitCode: 1}) + if !(strings.Contains(result.Combined(), "Permission denied") || strings.Contains(result.Combined(), "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", result.Combined(), result.Error) + } + + result = icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/attr/current") + result.Assert(c, icmd.Expected{ExitCode: 1}) + if !(strings.Contains(result.Combined(), "Permission denied") || strings.Contains(result.Combined(), "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", result.Combined(), result.Error) + } +} + +// make sure the default profile can be successfully parsed (using unshare as it is +// something which we know is blocked in the default profile) +func (s *DockerSuite) TestRunSeccompWithDefaultProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + out, _, err := dockerCmdWithError("run", "--security-opt", "seccomp=../profiles/seccomp/default.json", "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "unshare: unshare failed: Operation not permitted") +} + +// TestRunDeviceSymlink checks run with device that follows symlink (#13840 and #22271) +func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm, SameHostDaemon) + if _, err := os.Stat("/dev/zero"); err != nil { + c.Skip("Host does not have /dev/zero") + } + + // Create a temporary directory to create symlink + tmpDir, err := ioutil.TempDir("", "docker_device_follow_symlink_tests") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a symbolic link to /dev/zero + symZero := filepath.Join(tmpDir, "zero") + err = os.Symlink("/dev/zero", symZero) + c.Assert(err, checker.IsNil) + + // Create a temporary file "temp" inside tmpDir, write some data to "tmpDir/temp", + // then create a symlink "tmpDir/file" to the temporary file "tmpDir/temp". + tmpFile := filepath.Join(tmpDir, "temp") + err = ioutil.WriteFile(tmpFile, []byte("temp"), 0666) + c.Assert(err, checker.IsNil) + symFile := filepath.Join(tmpDir, "file") + err = os.Symlink(tmpFile, symFile) + c.Assert(err, checker.IsNil) + + // Create a symbolic link to /dev/zero, this time with a relative path (#22271) + err = os.Symlink("zero", "/dev/symzero") + if err != nil { + c.Fatal("/dev/symzero creation failed") + } + // We need to remove this symbolic link here as it is created in /dev/, not temporary directory as above + defer os.Remove("/dev/symzero") + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 + out, _ := dockerCmd(c, "run", "--device", symZero+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) + + // symlink "tmpDir/file" to a file "tmpDir/temp" will result in an error as it is not a device. + out, _, err = dockerCmdWithError("run", "--device", symFile+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(err, check.NotNil) + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "not a device node", check.Commentf("expected output 'not a device node'")) + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 (this time check with relative path backed, see #22271) + out, _ = dockerCmd(c, "run", "--device", "/dev/symzero:/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) +} + +// TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit +func (s *DockerSuite) TestRunPIDsLimit(c *check.C) { + testRequires(c, pidsLimit) + + file := "/sys/fs/cgroup/pids/pids.max" + out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "4") + + out = inspectField(c, "skittles", "HostConfig.PidsLimit") + c.Assert(out, checker.Equals, "4", check.Commentf("setting the pids limit failed")) +} + +func (s *DockerSuite) TestRunPrivilegedAllowedDevices(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "cat", file) + c.Logf("out: %q", out) + c.Assert(strings.TrimSpace(out), checker.Equals, "a *:* rwm") +} + +func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + + fi, err := os.Stat("/dev/snd/timer") + if err != nil { + c.Skip("Host does not have /dev/snd/timer") + } + stat, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + c.Skip("Could not stat /dev/snd/timer") + } + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--device", "/dev/snd/timer:w", "busybox", "cat", file) + c.Assert(out, checker.Contains, fmt.Sprintf("c %d:%d w", stat.Rdev/256, stat.Rdev%256)) +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + s.d.StartWithBusybox(c) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + s.d.StartWithBusybox(c) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "names": ["fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + s.d.StartWithBusybox(c) + + jsonData := `{ + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + } + ], + "architectures": [ + "SCMP_ARCH_X32" + ], + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") +} + +func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + s.d.StartWithBusybox(c) + + // 1) verify I can run containers with the Docker default shipped profile which allows chmod + _, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + // 2) restart the daemon and add a custom seccomp profile in which we deny chmod + s.d.Restart(c, "--seccomp-profile="+tmpFile.Name()) + + out, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestRunWithNanoCPUs(c *check.C) { + testRequires(c, cpuCfsQuota, cpuCfsPeriod) + + file1 := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + file2 := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpus", "0.5", "--name", "test", "busybox", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") + + out = inspectField(c, "test", "HostConfig.NanoCpus") + c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _, err := dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Conflicting options: Nano CPUs and CPU Period cannot both be set") +} diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go new file mode 100644 index 0000000..3b14576 --- /dev/null +++ b/integration-cli/docker_cli_save_load_test.go @@ -0,0 +1,385 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/opencontainers/go-digest" +) + +// save a repo using gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + out, _ := dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + repoTarball, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + deleteImages(repoName) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: strings.NewReader(repoTarball), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +// save a repo using xz+gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-gz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: strings.NewReader(out), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +func (s *DockerSuite) TestSaveSingleTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-single-tag-test" + dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedImageID := strings.TrimSpace(out) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), + exec.Command("tar", "t"), + exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "busybox:latest" + out, _ := dockerCmd(c, "inspect", repoName) + data := []struct { + ID string + Created time.Time + }{} + err := json.Unmarshal([]byte(out), &data) + c.Assert(err, checker.IsNil, check.Commentf("failed to marshal from %q: err %v", repoName, err)) + c.Assert(len(data), checker.Not(checker.Equals), 0, check.Commentf("failed to marshal the data from %q", repoName)) + tarTvTimeFormat := "2006-01-02 15:04" + out, _, err = testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("tar", "tv"), + exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveImageId(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-image-id-test" + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedLongImageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:") + + out, _ = dockerCmd(c, "images", "-q", repoName) + cleanedShortImageID := strings.TrimSpace(out) + + // Make sure IDs are not empty + c.Assert(cleanedLongImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + c.Assert(cleanedShortImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + + saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) + tarCmd := exec.Command("tar", "t") + + var err error + tarCmd.Stdin, err = saveCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for tar: %v", err)) + grepCmd := exec.Command("grep", cleanedLongImageID) + grepCmd.Stdin, err = tarCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for grep: %v", err)) + + c.Assert(tarCmd.Start(), checker.IsNil, check.Commentf("tar failed with error: %v", err)) + c.Assert(saveCmd.Start(), checker.IsNil, check.Commentf("docker save failed with error: %v", err)) + defer func() { + saveCmd.Wait() + tarCmd.Wait() + dockerCmd(c, "rmi", repoName) + }() + + out, _, err = runCommandWithOutput(grepCmd) + + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID: %s, %v", out, err)) +} + +// save a repo and try to load it using flags +func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-and-load-repo-flags" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + + deleteImages(repoName) + dockerCmd(c, "commit", name, repoName) + + before, _ := dockerCmd(c, "inspect", repoName) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + after, _ := dockerCmd(c, "inspect", repoName) + c.Assert(before, checker.Equals, after, check.Commentf("inspect is not the same after a save / load")) +} + +func (s *DockerSuite) TestSaveWithNoExistImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + imgName := "foobar-non-existing-image" + + out, _, err := dockerCmdWithError("save", "-o", "test-img.tar", imgName) + c.Assert(err, checker.NotNil, check.Commentf("save image should fail for non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("No such image: %s", imgName)) +} + +func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-multi-name-test" + + // Make one image + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) + + // Make two images + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), + exec.Command("tar", "xO", "repositories"), + exec.Command("grep", "-q", "-E", "(-one|-two)"), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple repos: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { + testRequires(c, DaemonIsLinux) + makeImage := func(from string, tag string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) + imageID := strings.TrimSpace(out) + return imageID + } + + repoName := "foobar-save-multi-images-test" + tagFoo := repoName + ":foo" + tagBar := repoName + ":bar" + + idFoo := makeImage("busybox:latest", tagFoo) + idBar := makeImage("busybox:latest", tagBar) + + deleteImages(repoName) + + // create the archive + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName, "busybox:latest"), + exec.Command("tar", "t")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) + + lines := strings.Split(strings.TrimSpace(out), "\n") + var actual []string + for _, l := range lines { + if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { + actual = append(actual, strings.TrimSuffix(l, ".json")) + } + } + + // make the list of expected layers + out = inspectField(c, "busybox:latest", "Id") + expected := []string{strings.TrimSpace(out), idFoo, idBar} + + // prefixes are not in tar + for i := range expected { + expected[i] = digest.Digest(expected[i]).Hex() + } + + sort.Strings(actual) + sort.Strings(expected) + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) +} + +// Issue #6722 #5892 ensure directories are included in changes +func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { + testRequires(c, DaemonIsLinux) + layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + + name := "save-directory-permissions" + tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary directory: %s", err)) + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + + defer os.RemoveAll(tmpDir) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`)) + + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command("tar", "-xf", "-", "-C", extractionDirectory), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and extract image: %s", out)) + + dirs, err := ioutil.ReadDir(extractionDirectory) + c.Assert(err, checker.IsNil, check.Commentf("failed to get a listing of the layer directories: %s", err)) + + found := false + for _, entry := range dirs { + var entriesSansDev []string + if entry.IsDir() { + layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") + + f, err := os.Open(layerPath) + c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err)) + defer f.Close() + + entries, err := testutil.ListTar(f) + for _, e := range entries { + if !strings.Contains(e, "dev/") { + entriesSansDev = append(entriesSansDev, e) + } + } + c.Assert(err, checker.IsNil, check.Commentf("encountered error while listing tar entries: %s", err)) + + if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { + found = true + break + } + } + } + + c.Assert(found, checker.Equals, true, check.Commentf("failed to find the layer with the right content listing")) + +} + +// Test loading a weird image where one of the layers is of zero size. +// The layer.tar file is actually zero bytes, no padding or anything else. +// See issue: 18170 +func (s *DockerSuite) TestLoadZeroSizeLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "load", "-i", "fixtures/load/emptyLayer.tar") +} + +func (s *DockerSuite) TestSaveLoadParents(c *check.C) { + testRequires(c, DaemonIsLinux) + + makeImage := func(from string, addfile string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "touch", addfile) + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + imageID := strings.TrimSpace(out) + + dockerCmd(c, "rm", "-f", cleanedContainerID) + return imageID + } + + idFoo := makeImage("busybox", "foo") + idBar := makeImage(idFoo, "bar") + + tmpDir, err := ioutil.TempDir("", "save-load-parents") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + c.Log("tmpdir", tmpDir) + + outfile := filepath.Join(tmpDir, "out.tar") + + dockerCmd(c, "save", "-o", outfile, idBar, idFoo) + dockerCmd(c, "rmi", idBar) + dockerCmd(c, "load", "-i", outfile) + + inspectOut := inspectField(c, idBar, "Parent") + c.Assert(inspectOut, checker.Equals, idFoo) + + inspectOut = inspectField(c, idFoo, "Parent") + c.Assert(inspectOut, checker.Equals, "") +} + +func (s *DockerSuite) TestSaveLoadNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "saveloadnotag" + + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV foo=bar")) + id := inspectField(c, name, "Id") + + // Test to make sure that save w/o name just shows imageID during load + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", id), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + // Should not show 'name' but should show the image ID during the load + c.Assert(out, checker.Not(checker.Contains), "Loaded image: ") + c.Assert(out, checker.Contains, "Loaded image ID:") + c.Assert(out, checker.Contains, id) + + // Test to make sure that save by name shows that name during load + out, _, err = testutil.RunCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + c.Assert(out, checker.Contains, "Loaded image: "+name+":latest") + c.Assert(out, checker.Not(checker.Contains), "Loaded image ID:") +} diff --git a/integration-cli/docker_cli_save_load_unix_test.go b/integration-cli/docker_cli_save_load_unix_test.go new file mode 100644 index 0000000..deb0616 --- /dev/null +++ b/integration-cli/docker_cli_save_load_unix_test.go @@ -0,0 +1,107 @@ +// +build !windows + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// save a repo and try to load it using stdout +func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { + name := "test-save-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + before, _ := dockerCmd(c, "commit", name, repoName) + before = strings.TrimRight(before, "\n") + + tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") + c.Assert(err, check.IsNil) + defer os.Remove(tmpFile.Name()) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "save", repoName}, + Stdout: tmpFile, + }).Assert(c, icmd.Success) + + tmpFile, err = os.Open(tmpFile.Name()) + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + deleteImages(repoName) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: tmpFile, + }).Assert(c, icmd.Success) + + after := inspectField(c, repoName, "Id") + after = strings.TrimRight(after, "\n") + + c.Assert(after, check.Equals, before) //inspect is not the same after a save / load + + deleteImages(repoName) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + cmd := exec.Command(dockerBinary, "save", repoName) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), check.IsNil) + c.Assert(cmd.Wait(), check.NotNil) //did not break writing to a TTY + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "cowardly refusing", check.Commentf("help output is not being yielded")) +} + +func (s *DockerSuite) TestSaveAndLoadWithProgressBar(c *check.C) { + name := "test-load" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN touch aa + `)) + + tmptar := name + ".tar" + dockerCmd(c, "save", "-o", tmptar, name) + defer os.Remove(tmptar) + + dockerCmd(c, "rmi", name) + dockerCmd(c, "tag", "busybox", name) + out, _ := dockerCmd(c, "load", "-i", tmptar) + expected := fmt.Sprintf("The image %s:latest already exists, renaming the old one with ID", name) + c.Assert(out, checker.Contains, expected) +} + +// fail because load didn't receive data from stdin +func (s *DockerSuite) TestLoadNoStdinFail(c *check.C) { + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, dockerBinary, "load") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), check.NotNil) // docker-load should fail + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "requested load from stdin, but stdin is empty") +} diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go new file mode 100644 index 0000000..2c3312d --- /dev/null +++ b/integration-cli/docker_cli_search_test.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// search for repos named "registry" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "busybox") + c.Assert(out, checker.Contains, "Busybox base image.", check.Commentf("couldn't find any repository named (or containing) 'Busybox base image.'")) +} + +func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { + out, _, err := dockerCmdWithError("search", "--filter", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-automated=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-official=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "--stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "-s=-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) +} + +func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "--help") + c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM") + + outSearchCmd, _ := dockerCmd(c, "search", "busybox") + outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") + + c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect.")) + + outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") + for i := range outSearchCmdautomatedSlice { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n") + for i := range outSearchCmdNotOfficialSlice { + c.Assert(strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)) + } + + outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n") + c.Assert(outSearchCmdOfficialSlice, checker.HasLen, 3) // 1 header, 1 line, 1 carriage return + c.Assert(strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), check.Equals, true, check.Commentf("The busybox is an OFFICIAL image: %s", outSearchCmdNotOfficial)) + + outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)) + + dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox") + + // --automated deprecated since Docker 1.13 + outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n") + for i := range outSearchCmdautomatedSlice1 { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + // -s --stars deprecated since Docker 1.13 + outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars1, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1)) + + // -s --stars deprecated since Docker 1.13 + dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") +} + +// search for repos which start with "ubuntu-" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistryWithDash(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + dockerCmd(c, "search", "ubuntu-") +} + +// test case for #23055 +func (s *DockerSuite) TestSearchWithLimit(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + limit := 10 + out, _, err := dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice := strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 50 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 100 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 0 + _, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) + + limit = 200 + _, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) +} diff --git a/integration-cli/docker_cli_secret_create_test.go b/integration-cli/docker_cli_secret_create_test.go new file mode 100644 index 0000000..839c392 --- /dev/null +++ b/integration-cli/docker_cli_secret_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + c.Assert(len(secret.Spec.Labels), checker.Equals, 2) + c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(secret.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: name, + }, + Data: []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + fake := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: id, + }, + Data: []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", fake)) + + out, err := d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("secret", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("secret", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("secret", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake[:5]) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "secretCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_secret" + out, err := d.Cmd("secret", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} diff --git a/integration-cli/docker_cli_secret_inspect_test.go b/integration-cli/docker_cli_secret_inspect_test.go new file mode 100644 index 0000000..218463b --- /dev/null +++ b/integration-cli/docker_cli_secret_inspect_test.go @@ -0,0 +1,68 @@ +// +build !windows + +package main + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + + out, err := d.Cmd("secret", "inspect", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 1) +} + +func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { + d := s.AddDaemon(c, true, true) + + testNames := []string{ + "test0", + "test1", + } + for _, n := range testNames { + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: n, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.GetSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, n) + + } + + args := []string{ + "secret", + "inspect", + } + args = append(args, testNames...) + out, err := d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 2) +} diff --git a/integration-cli/docker_cli_secret_ls_test.go b/integration-cli/docker_cli_secret_ls_test.go new file mode 100644 index 0000000..f3201f7 --- /dev/null +++ b/integration-cli/docker_cli_secret_ls_test.go @@ -0,0 +1,125 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretList(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName0 := "test0" + testName1 := "test1" + + // create secret test0 + id0 := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName0, + Labels: map[string]string{"type": "test"}, + }, + Data: []byte("TESTINGDATA0"), + }) + c.Assert(id0, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id0)) + + secret := d.GetSecret(c, id0) + c.Assert(secret.Spec.Name, checker.Equals, testName0) + + // create secret test1 + id1 := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName1, + Labels: map[string]string{"type": "production"}, + }, + Data: []byte("TESTINGDATA1"), + }) + c.Assert(id1, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id1)) + + secret = d.GetSecret(c, id1) + c.Assert(secret.Spec.Name, checker.Equals, testName1) + + // test by command `docker secret ls` + out, err := d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by name `docker secret ls --filter name=xxx` + args := []string{ + "secret", + "ls", + "--filter", + "name=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + // test filter by id `docker secret ls --filter id=xxx` + args = []string{ + "secret", + "ls", + "--filter", + "id=" + id1, + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test filter by label `docker secret ls --filter label=xxx` + args = []string{ + "secret", + "ls", + "--filter", + "label=type", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + args = []string{ + "secret", + "ls", + "--filter", + "label=type=test", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, testName0) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName1) + + args = []string{ + "secret", + "ls", + "--filter", + "label=type=production", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), testName0) + c.Assert(strings.TrimSpace(out), checker.Contains, testName1) + + // test invalid filter `docker secret ls --filter noexisttype=xxx` + args = []string{ + "secret", + "ls", + "--filter", + "noexisttype=test0", + } + out, err = d.Cmd(args...) + c.Assert(err, checker.NotNil, check.Commentf(out)) + + c.Assert(strings.TrimSpace(out), checker.Contains, "Error response from daemon: Invalid filter 'noexisttype'") +} diff --git a/integration-cli/docker_cli_service_create_test.go b/integration-cli/docker_cli_service_create_test.go new file mode 100644 index 0000000..6fc92c2 --- /dev/null +++ b/integration-cli/docker_cli_service_create_test.go @@ -0,0 +1,447 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "foo") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mountConfig[0].VolumeOptions, checker.NotNil) + c.Assert(mountConfig[0].VolumeOptions.NoCopy, checker.True) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-secret" + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--secret", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.DeleteSecret(c, testName) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *check.C) { + d := s.AddDaemon(c, true, true) + + testPaths := map[string]string{ + "app": "/etc/secret", + "test_secret": "test_secret", + "relative_secret": "relative/secret", + "escapes_in_container": "../secret", + } + + var secretFlags []string + + for testName, testTarget := range testPaths { + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA " + testName + " " + testTarget), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secretFlags = append(secretFlags, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + } + + serviceName := "svc" + serviceCmd := []string{"service", "create", "--no-resolve-image", "--name", serviceName} + serviceCmd = append(serviceCmd, secretFlags...) + serviceCmd = append(serviceCmd, "busybox", "top") + out, err := d.Cmd(serviceCmd...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, len(testPaths)) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for testName, testTarget := range testPaths { + path := testTarget + if !filepath.IsAbs(path) { + path = filepath.Join("/run/secrets", path) + } + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA "+testName+" "+testTarget) + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: "mysecret", + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + serviceName := "svc" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--secret", "source=mysecret,target=target1", "--secret", "source=mysecret,target=target2", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 2) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for _, target := range []string{"target1", "target2"} { + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join("/run/secrets", target) + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA") + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-config" + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--config", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].ConfigName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.DeleteConfig(c, testName) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *check.C) { + d := s.AddDaemon(c, true, true) + + testPaths := map[string]string{ + "app": "/etc/config", + "test_config": "test_config", + "relative_config": "relative/config", + } + + var configFlags []string + + for testName, testTarget := range testPaths { + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA " + testName + " " + testTarget), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + configFlags = append(configFlags, "--config", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + } + + serviceName := "svc" + serviceCmd := []string{"service", "create", "--no-resolve-image", "--name", serviceName} + serviceCmd = append(serviceCmd, configFlags...) + serviceCmd = append(serviceCmd, "busybox", "top") + out, err := d.Cmd(serviceCmd...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, len(testPaths)) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for testName, testTarget := range testPaths { + path := testTarget + if !filepath.IsAbs(path) { + path = filepath.Join("/", path) + } + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA "+testName+" "+testTarget) + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "myconfig", + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + serviceName := "svc" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--config", "source=myconfig,target=target1", "--config", "source=myconfig,target=target2", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 2) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + for _, target := range []string{"target1", "target2"} { + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join("/", target) + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA") + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mountConfig[0].TmpfsOptions, checker.NotNil) + c.Assert(mountConfig[0].TmpfsOptions.SizeBytes, checker.Equals, int64(1048576)) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mounts[0].Name, checker.Equals, "") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) + + out, err = s.nodeCmd(c, task.NodeID, "logs", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.HasPrefix, "tmpfs on /foo type tmpfs") + c.Assert(strings.TrimSpace(out), checker.Contains, "size=1024k") +} + +func (s *DockerSwarmSuite) TestServiceCreateWithNetworkAlias(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "create", "--scope=swarm", "test_swarm_br") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--network=name=test_swarm_br,alias=srv_alias", "--name=alias_tst_container", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container alias config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .NetworkSettings.Networks.test_swarm_br.Aliases}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure the only alias seen is the container-id + var aliases []string + c.Assert(json.Unmarshal([]byte(out), &aliases), checker.IsNil) + c.Assert(aliases, checker.HasLen, 1) + + c.Assert(task.Status.ContainerStatus.ContainerID, checker.Contains, aliases[0]) +} diff --git a/integration-cli/docker_cli_service_health_test.go b/integration-cli/docker_cli_service_health_test.go new file mode 100644 index 0000000..7898385 --- /dev/null +++ b/integration-cli/docker_cli_service_health_test.go @@ -0,0 +1,134 @@ +// +build !windows + +package main + +import ( + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// start a service, and then make its task unhealthy during running +// finally, unhealthy task should be detected and killed +func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // build image with health-check + // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build + imageName := "testhealth" + _, _, err := d.BuildImageWithOut(imageName, + `FROM busybox + RUN touch /status + HEALTHCHECK --interval=1s --timeout=1s --retries=1\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceRun" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.GetTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) + containerID := task.Status.ContainerStatus.ContainerID + + // wait for container to be healthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "healthy") + + // make it fail + d.Cmd("exec", containerID, "rm", "/status") + // wait for container to be unhealthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "unhealthy") + + // Task should be terminated + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.GetTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateFailed) + + if !strings.Contains(task.Status.Err, container.ErrContainerUnhealthy.Error()) { + c.Fatal("unhealthy task exits because of other error") + } +} + +// start a service whose task is unhealthy at beginning +// its tasks should be blocked in starting stage, until health check is passed +func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // service started from this image won't pass health check + imageName := "testhealth" + _, _, err := d.BuildImageWithOut(imageName, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceStart" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.GetTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateStarting) + + containerID := task.Status.ContainerStatus.ContainerID + + // wait for health check to work + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) + failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) + return failingStreak, nil + }, checker.GreaterThan, 0) + + // task should be blocked at starting status + task = d.GetTask(c, task.ID) + c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) + + // make it healthy + d.Cmd("exec", containerID, "touch", "/status") + + // Task should be at running status + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.GetTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) +} diff --git a/integration-cli/docker_cli_service_logs_test.go b/integration-cli/docker_cli_service_logs_test.go new file mode 100644 index 0000000..d38cdef --- /dev/null +++ b/integration-cli/docker_cli_service_logs_test.go @@ -0,0 +1,387 @@ +// +build !windows + +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +type logMessage struct { + err error + data []byte +} + +func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { + d := s.AddDaemon(c, true, true) + + // we have multiple services here for detecting the goroutine issue #28915 + services := map[string]string{ + "TestServiceLogs1": "hello1", + "TestServiceLogs2": "hello2", + } + + for name, message := range services { + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", + "sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + } + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, + d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{"busybox": len(services)}) + + for name, message := range services { + out, err := d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + c.Logf("log for %q: %q", name, out) + c.Assert(out, checker.Contains, message) + } +} + +// countLogLines returns a closure that can be used with waitAndAssert to +// verify that a minimum number of expected container log messages have been +// output. +func countLogLines(d *daemon.Swarm, name string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + result := icmd.RunCmd(d.Command("service", "logs", "-t", "--raw", name)) + result.Assert(c, icmd.Expected{}) + // if this returns an emptystring, trying to split it later will return + // an array containing emptystring. a valid log line will NEVER be + // emptystring because we ask for the timestamp. + if result.Stdout() == "" { + return 0, check.Commentf("Empty stdout") + } + lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + return len(lines), check.Commentf("output, %q", string(result.Stdout())) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsCompleteness" + + // make a service that prints 6 lines + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 0 5); do echo log test $line; done; sleep 100000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6) + + out, err = d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.TrimSpace(out), "\n") + + // i have heard anecdotal reports that logs may come back from the engine + // mis-ordered. if this tests fails, consider the possibility that that + // might be occurring + for i, line := range lines { + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsTail(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsTail" + + // make a service that prints 6 lines + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 1 6); do echo log test $line; done; sleep 100000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6) + + out, err = d.Cmd("service", "logs", "--tail=2", name) + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.TrimSpace(out), "\n") + + for i, line := range lines { + // doing i+5 is hacky but not too fragile, it's good enough. if it flakes something else is wrong + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i+5)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsSince(c *check.C) { + // See DockerSuite.TestLogsSince, which is where this comes from + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsSince" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do sleep .1; echo log$i; done; sleep 10000000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // wait a sec for the logs to come in + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 3) + + out, err = d.Cmd("service", "logs", "-t", name) + c.Assert(err, checker.IsNil) + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // timestamp log2 is written + c.Assert(err, checker.IsNil) + u := t.Add(50 * time.Millisecond) // add .05s so log1 & log2 don't show up + since := u.Format(time.RFC3339Nano) + + out, err = d.Cmd("service", "logs", "-t", fmt.Sprintf("--since=%v", since), name) + c.Assert(err, checker.IsNil) + + unexpected := []string{"log1", "log2"} + expected := []string{"log3"} + for _, v := range unexpected { + c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", u)) + } + for _, v := range expected { + c.Assert(out, checker.Contains, v, check.Commentf("expected log message %v, was not present, since=%v", u)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsFollow" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + args := []string{"service", "logs", "-f", name} + cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...) + r, w := io.Pipe() + cmd.Stdout = w + cmd.Stderr = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + ch := make(chan *logMessage) + done := make(chan struct{}) + go func() { + reader := bufio.NewReader(r) + for { + msg := &logMessage{} + msg.data, _, msg.err = reader.ReadLine() + select { + case ch <- msg: + case <-done: + return + } + } + }() + + for i := 0; i < 3; i++ { + msg := <-ch + c.Assert(msg.err, checker.IsNil) + c.Assert(string(msg.data), checker.Contains, "log test") + } + close(done) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} + +func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServicelogsTaskLogs" + replicas := 2 + + result := icmd.RunCmd(d.Command( + // create a service with the name + "service", "create", "--no-resolve-image", "--name", name, + // which has some number of replicas + fmt.Sprintf("--replicas=%v", replicas), + // which has this the task id as an environment variable templated in + "--env", "TASK={{.Task.ID}}", + // and runs this command to print exaclty 6 logs lines + "busybox", "sh", "-c", "for line in $(seq 0 5); do echo $TASK log test $line; done; sleep 100000", + )) + result.Assert(c, icmd.Expected{}) + // ^^ verify that we get no error + // then verify that we have an id in stdout + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + // so, right here, we're basically inspecting by id and returning only + // the ID. if they don't match, the service doesn't exist. + result = icmd.RunCmd(d.Command("service", "inspect", "--format=\"{{.ID}}\"", id)) + result.Assert(c, icmd.Expected{Out: id}) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas) + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6*replicas) + + // get the task ids + result = icmd.RunCmd(d.Command("service", "ps", "-q", name)) + result.Assert(c, icmd.Expected{}) + // make sure we have two tasks + taskIDs := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + c.Assert(taskIDs, checker.HasLen, replicas) + + for _, taskID := range taskIDs { + c.Logf("checking task %v", taskID) + result := icmd.RunCmd(d.Command("service", "logs", taskID)) + result.Assert(c, icmd.Expected{}) + lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + + c.Logf("checking messages for %v", taskID) + for i, line := range lines { + // make sure the message is in order + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i)) + // make sure it contains the task id + c.Assert(line, checker.Contains, taskID) + } + } +} + +func (s *DockerSwarmSuite) TestServiceLogsTTY(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsTTY" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--no-resolve-image", + // name it $name + "--name", name, + // use a TTY + "-t", + // busybox image, shell string + "busybox", "sh", "-c", + // echo to stdout and stderr + "echo out; (echo err 1>&2); sleep 10000", + )) + + result.Assert(c, icmd.Expected{}) + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + // so, right here, we're basically inspecting by id and returning only + // the ID. if they don't match, the service doesn't exist. + result = icmd.RunCmd(d.Command("service", "inspect", "--format=\"{{.ID}}\"", id)) + result.Assert(c, icmd.Expected{Out: id}) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2) + + cmd := d.Command("service", "logs", "--raw", name) + result = icmd.RunCmd(cmd) + // for some reason there is carriage return in the output. i think this is + // just expected. + c.Assert(result, icmd.Matches, icmd.Expected{Out: "out\r\nerr\r\n"}) +} + +func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsNoHangDeletedContainer" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--no-resolve-image", + // name it $name + "--name", name, + // busybox image, shell string + "busybox", "sh", "-c", + // echo to stdout and stderr + "while true; do echo line; sleep 2; done", + )) + + // confirm that the command succeeded + c.Assert(result, icmd.Matches, icmd.Expected{}) + // get the service id + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2) + + // now find and nuke the container + result = icmd.RunCmd(d.Command("ps", "-q")) + containerID := strings.TrimSpace(result.Stdout()) + c.Assert(containerID, checker.Not(checker.Equals), "") + result = icmd.RunCmd(d.Command("stop", containerID)) + c.Assert(result, icmd.Matches, icmd.Expected{Out: containerID}) + result = icmd.RunCmd(d.Command("rm", containerID)) + c.Assert(result, icmd.Matches, icmd.Expected{Out: containerID}) + + // run logs. use tail 2 to make sure we don't try to get a bunch of logs + // somehow and slow down execution time + cmd := d.Command("service", "logs", "--tail", "2", id) + // start the command and then wait for it to finish with a 3 second timeout + result = icmd.StartCmd(cmd) + result = icmd.WaitOnCmd(3*time.Second, result) + + // then, assert that the result matches expected. if the command timed out, + // if the command is timed out, result.Timeout will be true, but the + // Expected defaults to false + c.Assert(result, icmd.Matches, icmd.Expected{}) +} + +func (s *DockerSwarmSuite) TestServiceLogsDetails(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsDetails" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--no-resolve-image", + // name it $name + "--name", name, + // add an environment variable + "--env", "asdf=test1", + // add a log driver (without explicitly setting a driver, log-opt doesn't work) + "--log-driver", "json-file", + // add a log option to print the environment variable + "--log-opt", "env=asdf", + // busybox image, shell string + "busybox", "sh", "-c", + // make a log line + "echo LogLine; while true; do sleep 1; done;", + )) + + result.Assert(c, icmd.Expected{}) + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + + // make sure task has been deployed + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 1) + + // First, test without pretty printing + // call service logs with details. set raw to skip pretty printing + result = icmd.RunCmd(d.Command("service", "logs", "--raw", "--details", name)) + // in this case, we should get details and we should get log message, but + // there will also be context as details (which will fall after the detail + // we inserted in alphabetical order + c.Assert(result, icmd.Matches, icmd.Expected{Out: "asdf=test1"}) + c.Assert(result, icmd.Matches, icmd.Expected{Out: "LogLine"}) + + // call service logs with details. this time, don't pass raw + result = icmd.RunCmd(d.Command("service", "logs", "--details", id)) + // in this case, we should get details space logmessage as well. the context + // is part of the pretty part of the logline + c.Assert(result, icmd.Matches, icmd.Expected{Out: "asdf=test1 LogLine"}) +} diff --git a/integration-cli/docker_cli_service_scale_test.go b/integration-cli/docker_cli_service_scale_test.go new file mode 100644 index 0000000..8fb84fe --- /dev/null +++ b/integration-cli/docker_cli_service_scale_test.go @@ -0,0 +1,57 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceScale(c *check.C) { + d := s.AddDaemon(c, true, true) + + service1Name := "TestService1" + service1Args := append([]string{"service", "create", "--no-resolve-image", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // global mode + service2Name := "TestService2" + service2Args := append([]string{"service", "create", "--no-resolve-image", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create services + out, err := d.Cmd(service1Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd(service2Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=2") + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=foobar") + c.Assert(err, checker.NotNil) + + str := fmt.Sprintf("%s: invalid replicas value %s", service1Name, "foobar") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + out, err = d.Cmd("service", "scale", "TestService1=-1") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: invalid replicas value %s", service1Name, "-1") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + // TestService2 is a global mode + out, err = d.Cmd("service", "scale", "TestService2=2") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: scale can only be used with replicated mode\n", service2Name) + if out != str { + c.Errorf("got: %s, expected: %s", out, str) + } +} diff --git a/integration-cli/docker_cli_service_update_test.go b/integration-cli/docker_cli_service_update_test.go new file mode 100644 index 0000000..086ae77 --- /dev/null +++ b/integration-cli/docker_cli_service_update_test.go @@ -0,0 +1,172 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "TestServiceUpdatePort" + serviceArgs := append([]string{"service", "create", "--no-resolve-image", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create a service with a port mapping of 8080:8081. + out, err := d.Cmd(serviceArgs...) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) + c.Assert(err, checker.IsNil) + + // Inspect the service and verify port mapping + expected := []swarm.PortConfig{ + { + Protocol: "tcp", + PublishedPort: 8082, + TargetPort: 8083, + PublishMode: "ingress", + }, + } + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.EndpointSpec.Ports }}", serviceName) + c.Assert(err, checker.IsNil) + + var portConfig []swarm.PortConfig + if err := json.Unmarshal([]byte(out), &portConfig); err != nil { + c.Fatalf("invalid JSON in inspect result: %v (%s)", err, out) + } + c.Assert(portConfig, checker.DeepEquals, expected) +} + +func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service := d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + + // add label to empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") + + // add label to non-empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 2) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "") + + // now make sure we can add again + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.GetService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") +} + +func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + testTarget := "testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add secret + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} + +func (s *DockerSwarmSuite) TestServiceUpdateConfigs(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + testTarget := "/testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add config + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--config-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].ConfigName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--config-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} diff --git a/integration-cli/docker_cli_sni_test.go b/integration-cli/docker_cli_sni_test.go new file mode 100644 index 0000000..fb896d5 --- /dev/null +++ b/integration-cli/docker_cli_sni_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestClientSetsTLSServerName(c *check.C) { + c.Skip("Flakey test") + // there may be more than one hit to the server for each registry request + serverNameReceived := []string{} + var serverName string + + virtualHostServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverNameReceived = append(serverNameReceived, r.TLS.ServerName) + })) + defer virtualHostServer.Close() + // discard TLS handshake errors written by default to os.Stderr + virtualHostServer.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + + u, err := url.Parse(virtualHostServer.URL) + c.Assert(err, check.IsNil) + hostPort := u.Host + serverName = strings.Split(hostPort, ":")[0] + + repoName := fmt.Sprintf("%v/dockercli/image:latest", hostPort) + cmd := exec.Command(dockerBinary, "pull", repoName) + cmd.Run() + + // check that the fake server was hit at least once + c.Assert(len(serverNameReceived) > 0, check.Equals, true) + // check that for each hit the right server name was received + for _, item := range serverNameReceived { + c.Check(item, check.Equals, serverName) + } +} diff --git a/integration-cli/docker_cli_stack_test.go b/integration-cli/docker_cli_stack_test.go new file mode 100644 index 0000000..91fe4d7 --- /dev/null +++ b/integration-cli/docker_cli_stack_test.go @@ -0,0 +1,206 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +var cleanSpaces = func(s string) string { + lines := strings.Split(s, "\n") + for i, line := range lines { + spaceIx := strings.Index(line, " ") + if spaceIx > 0 { + lines[i] = line[:spaceIx+1] + strings.TrimLeft(line[spaceIx:], " ") + } + } + return strings.Join(lines, "\n") +} + +func (s *DockerSwarmSuite) TestStackRemoveUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "remove", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackPSUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "ps", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackServicesUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "services", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/default.yaml", + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n"+"testdeploy 2\n") + + out, err = d.Cmd("stack", "rm", testStackName) + c.Assert(err, checker.IsNil) + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n") +} + +func (s *DockerSwarmSuite) TestStackDeployWithSecretsTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("secret", "create", "outside", "fixtures/secrets/default") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/secrets.yaml", + testStackName, + } + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", "testdeploy_web") + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 3) + + sort.Sort(sortSecrets(refs)) + c.Assert(refs[0].SecretName, checker.Equals, "outside") + c.Assert(refs[1].SecretName, checker.Equals, "testdeploy_special") + c.Assert(refs[1].File.Name, checker.Equals, "special") + c.Assert(refs[2].SecretName, checker.Equals, "testdeploy_super") + c.Assert(refs[2].File.Name, checker.Equals, "foo.txt") + c.Assert(refs[2].File.Mode, checker.Equals, os.FileMode(0400)) + + // Deploy again to ensure there are no errors when secret hasn't changed + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestStackRemove(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/remove.yaml", + stackName, + } + result := icmd.RunCmd(d.Command(stackArgs...)) + result.Assert(c, icmd.Expected{ + Err: icmd.None, + Out: "Creating service testdeploy_web", + }) + + result = icmd.RunCmd(d.Command("service", "ls")) + result.Assert(c, icmd.Success) + c.Assert( + strings.Split(strings.TrimSpace(result.Stdout()), "\n"), + checker.HasLen, 2) + + result = icmd.RunCmd(d.Command("stack", "rm", stackName)) + result.Assert(c, icmd.Success) + stderr := result.Stderr() + c.Assert(stderr, checker.Contains, "Removing service testdeploy_web") + c.Assert(stderr, checker.Contains, "Removing network testdeploy_default") + c.Assert(stderr, checker.Contains, "Removing secret testdeploy_special") +} + +type sortSecrets []swarm.SecretReference + +func (s sortSecrets) Len() int { return len(s) } +func (s sortSecrets) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortSecrets) Less(i, j int) bool { return s[i].SecretName < s[j].SecretName } + +// testDAB is the DAB JSON used for testing. +// TODO: Use template/text and substitute "Image" with the result of +// `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest` +const testDAB = `{ + "Version": "0.1", + "Services": { + "srv1": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["top"] + }, + "srv2": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["tail"], + "Args": ["-f", "/dev/null"] + } + } +}` + +func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) { + testRequires(c, ExperimentalDaemon) + // setup + testStackName := "test" + testDABFileName := testStackName + ".dab" + defer os.RemoveAll(testDABFileName) + err := ioutil.WriteFile(testDABFileName, []byte(testDAB), 0444) + c.Assert(err, checker.IsNil) + d := s.AddDaemon(c, true, true) + // deploy + stackArgs := []string{ + "stack", "deploy", + "--bundle-file", testDABFileName, + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Loading bundle from test.dab\n") + c.Assert(out, checker.Contains, "Creating service test_srv1\n") + c.Assert(out, checker.Contains, "Creating service test_srv2\n") + // ls + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n"+"test 2\n") + // rm + stackArgs = []string{"stack", "rm", testStackName} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Removing service test_srv1\n") + c.Assert(out, checker.Contains, "Removing service test_srv2\n") + // ls (empty) + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(cleanSpaces(out), check.Equals, "NAME SERVICES\n") +} diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go new file mode 100644 index 0000000..2dd5fdf --- /dev/null +++ b/integration-cli/docker_cli_start_test.go @@ -0,0 +1,199 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// Regression test for https://github.com/docker/docker/issues/7843 +func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { + // Windows does not support link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "busybox") + + // Expect this to fail because the above container is stopped, this is what we want + out, _, err := dockerCmdWithError("run", "--name", "test2", "--link", "test:test", "busybox") + // err shouldn't be nil because container test2 try to link to stopped container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + ch := make(chan error) + go func() { + // Attempt to start attached to the container that won't start + // This should return an error immediately since the container can't be started + if out, _, err := dockerCmdWithError("start", "-a", "test2"); err == nil { + ch <- fmt.Errorf("Expected error but got none:\n%s", out) + } + close(ch) + }() + + select { + case err := <-ch: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatalf("Attach did not exit properly") + } +} + +// gh#8555: Exit code should be passed through when using start -a +func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { + testRequires(c, DaemonIsLinux) + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1").Stdout() + out = strings.TrimSpace(out) + + // make sure the container has exited before trying the "start -a" + cli.DockerCmd(c, "wait", out) + + cli.Docker(cli.Args("start", "-a", out)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) +} + +func (s *DockerSuite) TestStartAttachSilent(c *check.C) { + name := "teststartattachcorrectexitcode" + dockerCmd(c, "run", "--name", name, "busybox", "echo", "test") + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", name) + + startOut, _ := dockerCmd(c, "start", "-a", name) + // start -a produced unexpected output + c.Assert(startOut, checker.Equals, "test\n") +} + +func (s *DockerSuite) TestStartRecordError(c *check.C) { + // TODO Windows CI: Requires further porting work. Should be possible. + testRequires(c, DaemonIsLinux) + // when container runs successfully, we should not have state.Error + dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + stateErr := inspectField(c, "test", "State.Error") + // Expected to not have state error + c.Assert(stateErr, checker.Equals, "") + + // Expect this to fail and records error because of ports conflict + out, _, err := dockerCmdWithError("run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top") + // err shouldn't be nil because docker run will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + stateErr = inspectField(c, "test2", "State.Error") + c.Assert(stateErr, checker.Contains, "port is already allocated") + + // Expect the conflict to be resolved when we stop the initial container + dockerCmd(c, "stop", "test") + dockerCmd(c, "start", "test2") + stateErr = inspectField(c, "test2", "State.Error") + // Expected to not have state error but got one + c.Assert(stateErr, checker.Equals, "") +} + +func (s *DockerSuite) TestStartPausedContainer(c *check.C) { + // Windows does not support pausing containers + testRequires(c, IsPausable) + + runSleepingContainer(c, "-d", "--name", "testing") + + dockerCmd(c, "pause", "testing") + + out, _, err := dockerCmdWithError("start", "testing") + // an error should have been shown that you cannot start paused container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an error should have been shown that you cannot start paused container + c.Assert(out, checker.Contains, "Cannot start a paused container, try unpause instead.") +} + +func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { + // Windows does not support --link + testRequires(c, DaemonIsLinux) + // run a container named 'parent' and create two container link to `parent` + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + for _, container := range []string{"child_first", "child_second"} { + dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top") + } + + // stop 'parent' container + dockerCmd(c, "stop", "parent") + + out := inspectField(c, "parent", "State.Running") + // Container should be stopped + c.Assert(out, checker.Equals, "false") + + // start all the three containers, container `child_first` start first which should be failed + // container 'parent' start second and then start container 'child_second' + expOut := "Cannot link to a non running container" + expErr := "failed to start containers: [child_first]" + out, _, err := dockerCmdWithError("start", "child_first", "parent", "child_second") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + if !(strings.Contains(out, expOut) || strings.Contains(err.Error(), expErr)) { + c.Fatalf("Expected out: %v with err: %v but got out: %v with err: %v", expOut, expErr, out, err) + } + + for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { + // run multiple containers to test + for _, container := range []string{"test1", "test2", "test3"} { + runSleepingContainer(c, "--name", container) + } + + // stop all the containers + for _, container := range []string{"test1", "test2", "test3"} { + dockerCmd(c, "stop", container) + } + + // test start and attach multiple containers at once, expected error + for _, option := range []string{"-a", "-i", "-ai"} { + out, _, err := dockerCmdWithError("start", option, "test1", "test2", "test3") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + c.Assert(out, checker.Contains, "you cannot start and attach multiple containers at once") + } + + // confirm the state of all the containers be stopped + for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +// Test case for #23716 +func (s *DockerSuite) TestStartAttachWithRename(c *check.C) { + testRequires(c, DaemonIsLinux) + cli.DockerCmd(c, "create", "-t", "--name", "before", "busybox") + go func() { + cli.WaitRun(c, "before") + cli.DockerCmd(c, "rename", "before", "after") + cli.DockerCmd(c, "stop", "--time=2", "after") + }() + // FIXME(vdemeester) the intent is not clear and potentially racey + result := cli.Docker(cli.Args("start", "-a", "before")).Assert(c, icmd.Expected{ + ExitCode: 137, + }) + c.Assert(result.Stderr(), checker.Not(checker.Contains), "No such container") +} + +func (s *DockerSuite) TestStartReturnCorrectExitCode(c *check.C) { + dockerCmd(c, "create", "--restart=on-failure:2", "--name", "withRestart", "busybox", "sh", "-c", "exit 11") + dockerCmd(c, "create", "--rm", "--name", "withRm", "busybox", "sh", "-c", "exit 12") + + _, exitCode, err := dockerCmdWithError("start", "-a", "withRestart") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 11) + _, exitCode, err = dockerCmdWithError("start", "-a", "withRm") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 12) +} diff --git a/integration-cli/docker_cli_stats_test.go b/integration-cli/docker_cli_stats_test.go new file mode 100644 index 0000000..9d40ce0 --- /dev/null +++ b/integration-cli/docker_cli_stats_test.go @@ -0,0 +1,179 @@ +package main + +import ( + "bufio" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStatsNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id) + type output struct { + out []byte + err error + } + + ch := make(chan output) + go func() { + out, err := statsCmd.Output() + ch <- output{out, err} + }() + + select { + case outerr := <-ch: + c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err)) + c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output + case <-time.After(3 * time.Second): + statsCmd.Process.Kill() + c.Fatalf("stats did not return immediately when not streaming") + } +} + +func (s *DockerSuite) TestStatsContainerNotFound(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("stats", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats, got %q instead", out)) + + out, _, err = dockerCmdWithError("stats", "--no-stream", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats with --no-stream, got %q instead", out)) +} + +func (s *DockerSuite) TestStatsAllRunningNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id3 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id3), check.IsNil) + dockerCmd(c, "stop", id3) + + out, _ = dockerCmd(c, "stats", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + if strings.Contains(out, id3) { + c.Fatalf("Did not expect %s in stats, got %s", id3, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + // outLines[2] is id1's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) + // check stat result of id1 contains real data + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) +} + +func (s *DockerSuite) TestStatsAllNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + dockerCmd(c, "stop", id1) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + + out, _ = dockerCmd(c, "stats", "--all", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result of %s is empty: %s", id2, out)) + // check stat result of id1 contains all zero + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.IsNil, check.Commentf("stat result of %s should be empty : %s", id1, out)) +} + +func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + id := make(chan string) + addedChan := make(chan struct{}) + + runSleepingContainer(c, "-d") + statsCmd := exec.Command(dockerBinary, "stats") + stdout, err := statsCmd.StdoutPipe() + c.Assert(err, check.IsNil) + c.Assert(statsCmd.Start(), check.IsNil) + defer statsCmd.Process.Kill() + + go func() { + containerID := <-id + matchID := regexp.MustCompile(containerID) + + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + switch { + case matchID.MatchString(scanner.Text()): + close(addedChan) + return + } + } + }() + + out := runSleepingContainer(c, "-d") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + id <- strings.TrimSpace(out)[:12] + + select { + case <-time.After(30 * time.Second): + c.Fatal("failed to observe new container created added to stats") + case <-addedChan: + // ignore, done + } +} + +func (s *DockerSuite) TestStatsFormatAll(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + cli.DockerCmd(c, "run", "-d", "--name=RunningOne", "busybox", "top") + cli.WaitRun(c, "RunningOne") + cli.DockerCmd(c, "run", "-d", "--name=ExitedOne", "busybox", "top") + cli.DockerCmd(c, "stop", "ExitedOne") + cli.WaitExited(c, "ExitedOne", 5*time.Second) + + out := cli.DockerCmd(c, "stats", "--no-stream", "--format", "{{.Name}}").Combined() + c.Assert(out, checker.Contains, "RunningOne") + c.Assert(out, checker.Not(checker.Contains), "ExitedOne") + + out = cli.DockerCmd(c, "stats", "--all", "--no-stream", "--format", "{{.Name}}").Combined() + c.Assert(out, checker.Contains, "RunningOne") + c.Assert(out, checker.Contains, "ExitedOne") +} diff --git a/integration-cli/docker_cli_stop_test.go b/integration-cli/docker_cli_stop_test.go new file mode 100644 index 0000000..1be4120 --- /dev/null +++ b/integration-cli/docker_cli_stop_test.go @@ -0,0 +1,17 @@ +package main + +import ( + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStopContainerWithRestartPolicyAlways(c *check.C) { + dockerCmd(c, "run", "--name", "verifyRestart1", "-d", "--restart=always", "busybox", "false") + dockerCmd(c, "run", "--name", "verifyRestart2", "-d", "--restart=always", "busybox", "false") + + c.Assert(waitRun("verifyRestart1"), checker.IsNil) + c.Assert(waitRun("verifyRestart2"), checker.IsNil) + + dockerCmd(c, "stop", "verifyRestart1") + dockerCmd(c, "stop", "verifyRestart2") +} diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go new file mode 100644 index 0000000..086a663 --- /dev/null +++ b/integration-cli/docker_cli_swarm_test.go @@ -0,0 +1,2205 @@ +// +build !windows + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/pkg/testutil" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + getSpec := func() swarm.Spec { + sw := d.GetSwarm(c) + return sw.Spec + } + + out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + + // setting anything under 30m for cert-expiry is not allowed + out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "minimum certificate expiry time") + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + + // passing an external CA (this is without starting a root rotation) does not fail + cli.Docker(cli.Args("swarm", "update", "--external-ca", "protocol=cfssl,url=https://something.org", + "--external-ca", "protocol=cfssl,url=https://somethingelse.org,cacert=fixtures/https/ca.pem"), + cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + expected, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + + spec = getSpec() + c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, string(expected)) + + // passing an invalid external CA fails + tempFile := tempfile.NewTempFile(c, "testfile", "fakecert") + defer tempFile.Remove() + + result := cli.Docker(cli.Args("swarm", "update", + "--external-ca", fmt.Sprintf("protocol=cfssl,url=https://something.org,cacert=%s", tempFile.Name())), + cli.Daemon(d.Daemon)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "must be in PEM format", + }) +} + +func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { + d := s.AddDaemon(c, false, false) + + getSpec := func() swarm.Spec { + sw := d.GetSwarm(c) + return sw.Spec + } + + // passing an invalid external CA fails + tempFile := tempfile.NewTempFile(c, "testfile", "fakecert") + defer tempFile.Remove() + + result := cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", + "--external-ca", fmt.Sprintf("protocol=cfssl,url=https://somethingelse.org,cacert=%s", tempFile.Name())), + cli.Daemon(d.Daemon)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "must be in PEM format", + }) + + cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", + "--external-ca", "protocol=cfssl,url=https://something.org", + "--external-ca", "protocol=cfssl,url=https://somethingelse.org,cacert=fixtures/https/ca.pem"), + cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + expected, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, string(expected)) + + c.Assert(d.Leave(true), checker.IsNil) + cli.Docker(cli.Args("swarm", "init"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 5*time.Second) +} + +func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) { + testRequires(c, IPv6) + d1 := s.AddDaemon(c, false, false) + cli.Docker(cli.Args("swarm", "init", "--listen-add", "::1"), cli.Daemon(d1.Daemon)).Assert(c, icmd.Success) + + d2 := s.AddDaemon(c, false, false) + cli.Docker(cli.Args("swarm", "join", "::1"), cli.Daemon(d2.Daemon)).Assert(c, icmd.Success) + + out := cli.Docker(cli.Args("info"), cli.Daemon(d2.Daemon)).Assert(c, icmd.Success).Combined() + c.Assert(out, checker.Contains, "Swarm: active") +} + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + out, err := d.Cmd("swarm", "init", "--advertise-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "advertise address must be a non-zero IP address") +} + +func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { + // init swarm mode and stop a daemon + d := s.AddDaemon(c, true, true) + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + d.Stop(c) + + // start a daemon with --cluster-store and --cluster-advertise + err = d.StartWithError("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.NotNil) + content, err := d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + + // start a daemon with --live-restore + err = d.StartWithError("--live-restore") + c.Assert(err, checker.NotNil) + content, err = d.ReadLogFile() + c.Assert(err, checker.IsNil) + c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") + // restart for teardown + d.Start(c) +} + +func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + containers := d.ActiveContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) +} + +// Test case for #24270 +func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name1 := "redis-cluster-md5" + name2 := "redis-cluster" + name3 := "other-cluster" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name1, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name2, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name3, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter1 := "name=redis-cluster-md5" + filter2 := "name=redis-cluster" + + // We search checker.Contains with `name+" "` to prevent prefix only. + out, err = d.Cmd("service", "ls", "--filter", filter1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Not(checker.Contains), name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls", "--filter", filter2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Contains, name3+" ") +} + +func (s *DockerSwarmSuite) TestSwarmNodeListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("node", "inspect", "--format", "{{ .Description.Hostname }}", "self") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + name := strings.TrimSpace(out) + + filter := "name=" + name[:4] + + out, err = d.Cmd("node", "ls", "--filter", filter) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("node", "ls", "--filter", "name=none") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 3) + + filter := "name=redis-cluster" + + out, err = d.Cmd("node", "ps", "--filter", filter, "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("node", "ps", "--filter", "name=none", "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") +} + +// Test case for #25375 +func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--label", "x=y", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "update", "--publish-add", "80:80", name) + c.Assert(err, checker.IsNil) + + out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name) + c.Assert(err, checker.IsNil) + + out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name) + c.Assert(err, checker.NotNil) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "[{ tcp 80 80 ingress}]") +} + +func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + container := strings.TrimSpace(out) + + out, err = d.Cmd("exec", container, "id") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777") +} + +func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("run", "-id", "--restart=always", "--net=foo", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + d.Restart(c) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + _, err = d.Cmd("run", "-d", "--net=foo", "--net-alias=third-alias", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // ping first container and its alias, also ping third and anonymous container by its alias + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil, check.Commentf(out)) + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first-alias") + c.Assert(err, check.IsNil, check.Commentf(out)) + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "third-alias") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "testnet") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + networkID := strings.TrimSpace(out) + + out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top") + c.Assert(err, checker.IsNil) + cID := strings.TrimSpace(out) + d.WaitRun(cID) + + _, err = d.Cmd("rm", "-f", cID) + c.Assert(err, checker.IsNil) + + _, err = d.Cmd("network", "rm", "testnet") + c.Assert(err, checker.IsNil) + + checkNetwork := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls") + c.Assert(err, checker.IsNil) + return out, nil + } + + waitAndAssert(c, 3*time.Second, checkNetwork, checker.Not(checker.Contains), "testnet") +} + +func (s *DockerSwarmSuite) TestOverlayAttachable(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // validate attachable + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") + + // validate containers can attache to this overlay network + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // redo validation, there was a bug that the value of attachable changes after + // containers attach to the network + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create an attachable swarm network + nwName := "attovl" + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", nwName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Connect a container to the network + out, err = d.Cmd("run", "-d", "--network", nwName, "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Leave the swarm + err = d.Leave(true) + c.Assert(err, checker.IsNil) + + // Check the container is disconnected + out, err = d.Cmd("inspect", "c1", "--format", "{{.NetworkSettings.Networks."+nwName+"}}") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Check the network is gone + out, err = d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), nwName) +} + +func (s *DockerSwarmSuite) TestOverlayAttachableReleaseResourcesOnFailure(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create attachable network + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "--subnet", "10.10.9.0/24", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach a container with specific IP + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "--ip", "10.10.9.33", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attempt to attach another container with same IP, must fail + _, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c2", "--ip", "10.10.9.33", "busybox", "top") + c.Assert(err, checker.NotNil) + + // Remove first container + out, err = d.Cmd("rm", "-f", "c1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Verify the network can be removed, no phantom network attachment task left over + out, err = d.Cmd("network", "rm", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmIngressNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Ingress network can be removed + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command("echo", "Y"), + exec.Command("docker", "-H", d.Sock(), "network", "rm", "ingress"), + ) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // And recreated + out, err = d.Cmd("network", "create", "-d", "overlay", "--ingress", "new-ingress") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // But only one is allowed + out, err = d.Cmd("network", "create", "-d", "overlay", "--ingress", "another-ingress") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "is already present") + + // It cannot be removed if it is being used + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv1", "-p", "9000:8000", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, _, err = testutil.RunCommandPipelineWithOutput( + exec.Command("echo", "Y"), + exec.Command("docker", "-H", d.Sock(), "network", "rm", "new-ingress"), + ) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "ingress network cannot be removed because service") + + // But it can be removed once no more services depend on it + out, err = d.Cmd("service", "update", "--publish-rm", "9000:8000", "srv1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, _, err = testutil.RunCommandPipelineWithOutput( + exec.Command("echo", "Y"), + exec.Command("docker", "-H", d.Sock(), "network", "rm", "new-ingress"), + ) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // A service which needs the ingress network cannot be created if no ingress is present + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv2", "-p", "500:500", "busybox", "top") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present") + + // An existing service cannot be updated to use the ingress nw if the nw is not present + out, err = d.Cmd("service", "update", "--publish-add", "9000:8000", "srv1") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present") + + // But services which do not need routing mesh can be created regardless + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv3", "--endpoint-mode", "dnsrr", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmCreateServiceWithNoIngressNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Remove ingress network + out, _, err := testutil.RunCommandPipelineWithOutput( + exec.Command("echo", "Y"), + exec.Command("docker", "-H", d.Sock(), "network", "rm", "ingress"), + ) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Create a overlay network and launch a service on it + // Make sure nothing panics because ingress network is missing + out, err = d.Cmd("network", "create", "-d", "overlay", "another-network") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv4", "--network", "another-network", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +// Test case for #24108, also the case from: +// https://github.com/docker/docker/pull/24620#issuecomment-233715656 +func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter := "name=redis-cluster" + + checkNumTasks := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + return len(strings.Split(out, "\n")) - 2, nil // includes header and nl in last line + } + + // wait until all tasks have been created + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 3) + + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name="+name+".1", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + name = "redis-cluster-sha1" + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--mode=global", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 1) + + filter = "name=redis-cluster" + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name="+name, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a bare container + out, err := d.Cmd("run", "-d", "--name=bare-container", "busybox", "top") + c.Assert(err, checker.IsNil) + bareID := strings.TrimSpace(out)[:12] + // Create a service + name := "busybox-top" + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceRunningTasks(name), checker.Equals, 1) + + // Filter non-tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false") + c.Assert(err, checker.IsNil) + psOut := strings.TrimSpace(out) + c.Assert(psOut, checker.Equals, bareID, check.Commentf("Expected id %s, got %s for is-task label, output %q", bareID, psOut, out)) + + // Filter tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=true") + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + c.Assert(lines, checker.HasLen, 1) + c.Assert(lines[0], checker.Not(checker.Equals), bareID, check.Commentf("Expected not %s, but got it for is-task label, output %q", bareID, out)) +} + +const globalNetworkPlugin = "global-network-plugin" +const globalIPAMPlugin = "global-ipam-plugin" + +func setupRemoteGlobalNetworkPlugin(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"global"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.AllocateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.FreeNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteGlobalNetworkPlugin(c, mux, s.server.URL, globalNetworkPlugin, globalIPAMPlugin) + defer func() { + s.server.Close() + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) + }() + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", globalNetworkPlugin, "foo") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "not supported in swarm mode") +} + +// Test case for #24712 +func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + path := filepath.Join(d.Folder, "env.txt") + err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644) + c.Assert(err, checker.IsNil) + + name := "worker" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // The complete env is [VAR1=A VAR2=A VAR1=B VAR1=C VAR2= VAR2] and duplicates will be removed => [VAR1=C VAR2] + out, err = d.Cmd("inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.Env }}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[VAR1=C VAR2]") +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + + ttyCheck := "if [ -t 0 ]; then echo TTY > /status && top; else echo none > /status && top; fi" + + // Without --tty + expectedOutput := "none" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + // Remove service + out, err = d.Cmd("service", "rm", name) + c.Assert(err, checker.IsNil) + // Make sure container has been destroyed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) + + // With --tty + expectedOutput = "TTY" + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "false") + + _, err = d.Cmd("service", "update", "--tty", name) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestSwarmServiceNetworkUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + result := icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "foo")) + result.Assert(c, icmd.Success) + fooNetwork := strings.TrimSpace(string(result.Combined())) + + result = icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "bar")) + result.Assert(c, icmd.Success) + barNetwork := strings.TrimSpace(string(result.Combined())) + + result = icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "baz")) + result.Assert(c, icmd.Success) + bazNetwork := strings.TrimSpace(string(result.Combined())) + + // Create a service + name := "top" + result = icmd.RunCmd(d.Command("service", "create", "--no-resolve-image", "--network", "foo", "--network", "bar", "--name", name, "busybox", "top")) + result.Assert(c, icmd.Success) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{fooNetwork: 1, barNetwork: 1}) + + // Remove a network + result = icmd.RunCmd(d.Command("service", "update", "--network-rm", "foo", name)) + result.Assert(c, icmd.Success) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{barNetwork: 1}) + + // Add a network + result = icmd.RunCmd(d.Command("service", "update", "--network-add", "baz", name)) + result.Assert(c, icmd.Success) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{barNetwork: 1, bazNetwork: 1}) +} + +func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out, err = d.Cmd("exec", id, "cat", "/etc/resolv.conf") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + _, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.DNSConfig }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}") +} + +func getNodeStatus(c *check.C, d *daemon.Swarm) swarm.LocalNodeState { + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + return info.LocalNodeState +} + +func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Swarm, unlockKey string) { + d.Restart(c) + status := getNodeStatus(c, d) + if status == swarm.LocalNodeStateLocked { + // it must not have updated to be unlocked in time - unlock, wait 3 seconds, and try again + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + time.Sleep(3 * time.Second) + d.Restart(c) + } + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) +} + +func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Swarm) { + d.Restart(c) + status := getNodeStatus(c, d) + if status == swarm.LocalNodeStateActive { + // it must not have updated to be unlocked in time - wait 3 seconds, and try again + time.Sleep(3 * time.Second) + d.Restart(c) + } + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) +} + +func (s *DockerSwarmSuite) TestUnlockEngineAndUnlockedSwarm(c *check.C) { + d := s.AddDaemon(c, false, false) + + // unlocking a normal engine should return an error - it does not even ask for the key + cmd := d.Command("swarm", "unlock") + result := icmd.RunCmd(cmd) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(result.Combined(), checker.Contains, "Error: This node is not part of a swarm") + c.Assert(result.Combined(), checker.Not(checker.Contains), "Please enter unlock key") + + _, err := d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil) + + // unlocking an unlocked swarm should return an error - it does not even ask for the key + cmd = d.Command("swarm", "unlock") + result = icmd.RunCmd(cmd) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(result.Combined(), checker.Contains, "Error: swarm is not locked") + c.Assert(result.Combined(), checker.Not(checker.Contains), "Please enter unlock key") +} + +func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + // It starts off locked + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString("wrong-secret-key") + icmd.RunCmd(cmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + outs, err = d.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + checkSwarmLockedToUnlocked(c, d, unlockKey) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") +} + +func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + // It starts off locked + d.Restart(c, "--swarm-default-advertise-addr=lo") + + info, err := d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + // `docker swarm leave` a locked swarm without --force will return an error + outs, _ = d.Cmd("swarm", "leave") + c.Assert(outs, checker.Contains, "Swarm is encrypted and locked.") + + // It is OK for user to leave a locked swarm with --force + outs, err = d.Cmd("swarm", "leave", "--force") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + outs, err = d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.SwarmInfo() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // they start off unlocked + d2.Restart(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // stop this one so it does not get autolock info + d2.Stop(c) + + // enable autolock + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // The ones that got the cluster update should be set to locked + for _, d := range []*daemon.Swarm{d1, d3} { + checkSwarmUnlockedToLocked(c, d) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + } + + // d2 never got the cluster update, so it is still set to unlocked + d2.Start(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // d2 is now set to lock + checkSwarmUnlockedToLocked(c, d2) + + // leave it locked, and set the cluster to no longer autolock + outs, err = d1.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + // the ones that got the update are now set to unlocked + for _, d := range []*daemon.Swarm{d1, d3} { + checkSwarmLockedToUnlocked(c, d, unlockKey) + } + + // d2 still locked + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateLocked) + + // unlock it + cmd := d2.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // once it's caught up, d2 is set to not be locked + checkSwarmLockedToUnlocked(c, d2, unlockKey) + + // managers who join now are never set to locked in the first place + d4 := s.AddDaemon(c, true, true) + d4.Restart(c) + c.Assert(getNodeStatus(c, d4), checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { + d1 := s.AddDaemon(c, true, true) + + // enable autolock + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // joined workers start off unlocked + d2 := s.AddDaemon(c, true, false) + d2.Restart(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // promote worker + outs, err = d1.Cmd("node", "promote", d2.Info.NodeID) + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Contains, "promoted to a manager in the swarm") + + // join new manager node + d3 := s.AddDaemon(c, true, true) + + // both new nodes are locked + for _, d := range []*daemon.Swarm{d2, d3} { + checkSwarmUnlockedToLocked(c, d) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + } + + // demote manager back to worker - workers are not locked + outs, err = d1.Cmd("node", "demote", d3.Info.NodeID) + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Contains, "demoted in the swarm") + + // Wait for it to actually be demoted, for the key and cert to be replaced. + // Then restart and assert that the node is not locked. If we don't wait for the cert + // to be replaced, then the node still has the manager TLS key which is still locked + // (because we never want a manager TLS key to be on disk unencrypted if the cluster + // is set to autolock) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckControlAvailable, checker.False) + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + certBytes, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) + if err != nil { + return "", check.Commentf("error: %v", err) + } + certs, err := helpers.ParseCertificatesPEM(certBytes) + if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 { + return certs[0].Subject.OrganizationalUnit[0], nil + } + return "", check.Commentf("could not get organizational unit from certificate") + }, checker.Equals, "swarm-worker") + + // by now, it should *never* be locked on restart + d3.Restart(c) + c.Assert(getNodeStatus(c, d3), checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { + d := s.AddDaemon(c, true, true) + + outs, err := d.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // Rotate multiple times + for i := 0; i != 3; i++ { + outs, err = d.Cmd("swarm", "unlock-key", "-q", "--rotate") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + // Strip \n + newUnlockKey := outs[:len(outs)-1] + c.Assert(newUnlockKey, checker.Not(checker.Equals), "") + c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) + + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result := icmd.RunCmd(cmd) + + if result.Error == nil { + // On occasion, the daemon may not have finished + // rotating the KEK before restarting. The test is + // intentionally written to explore this behavior. + // When this happens, unlocking with the old key will + // succeed. If we wait for the rotation to happen and + // restart again, the new key should be required this + // time. + + time.Sleep(3 * time.Second) + + d.Restart(c) + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result = icmd.RunCmd(cmd) + } + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(newUnlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + unlockKey = newUnlockKey + } +} + +// This differs from `TestSwarmRotateUnlockKey` because that one rotates a single node, which is the leader. +// This one keeps the leader up, and asserts that other manager nodes in the cluster also have their unlock +// key rotated. +func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { + d1 := s.AddDaemon(c, true, true) // leader - don't restart this one, we don't want leader election delays + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // Rotate multiple times + for i := 0; i != 3; i++ { + outs, err = d1.Cmd("swarm", "unlock-key", "-q", "--rotate") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + // Strip \n + newUnlockKey := outs[:len(outs)-1] + c.Assert(newUnlockKey, checker.Not(checker.Equals), "") + c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) + + d2.Restart(c) + d3.Restart(c) + + for _, d := range []*daemon.Swarm{d2, d3} { + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ := d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result := icmd.RunCmd(cmd) + + if result.Error == nil { + // On occasion, the daemon may not have finished + // rotating the KEK before restarting. The test is + // intentionally written to explore this behavior. + // When this happens, unlocking with the old key will + // succeed. If we wait for the rotation to happen and + // restart again, the new key should be required this + // time. + + time.Sleep(3 * time.Second) + + d.Restart(c) + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result = icmd.RunCmd(cmd) + } + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(newUnlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + } + + unlockKey = newUnlockKey + } +} + +func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *check.C) { + d := s.AddDaemon(c, true, true) + + var unlockKey string + for i := 0; i < 2; i++ { + // set to lock + outs, err := d.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + c.Assert(outs, checker.Contains, "docker swarm unlock") + + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + checkSwarmUnlockedToLocked(c, d) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + checkSwarmLockedToUnlocked(c, d, unlockKey) + } +} + +func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput := "1.2.3.4\texample.com" + out, err = d.Cmd("exec", id, "cat", "/etc/hosts") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + // Manager Addresses will always show Node 1's address + expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.Port) + + out, err := d1.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d3.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +func (s *DockerSwarmSuite) TestSwarmServiceInspectPretty(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--limit-cpu=0.5", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + expectedOutput := ` +Resources: + Limits: + CPU: 0.5` + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--ipam-opt", "foo=bar", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--network=foo", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "trusted" + cli.Docker(cli.Args("-D", "service", "create", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + Err: "resolved image tag to", + }) + + out, err := d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + + // Try trusted service create on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + name = "untrusted" + cli.Docker(cli.Args("service", "create", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: remote trust data does not exist", + }) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "myservice" + + // Create a service without content trust + cli.Docker(cli.Args("service", "create", "--no-resolve-image", "--name", name, repoName, "top"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success) + + result := cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon)) + c.Assert(result.Error, checker.IsNil, check.Commentf(result.Combined())) + // Daemon won't insert the digest because this is disabled by + // DOCKER_SERVICE_PREFER_OFFLINE_IMAGE. + c.Assert(result.Combined(), check.Not(checker.Contains), repoName+"@", check.Commentf(result.Combined())) + + cli.Docker(cli.Args("-D", "service", "update", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + Err: "resolved image tag to", + }) + + cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + Out: repoName + "@", + }) + + // Try trusted service update on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + cli.Docker(cli.Args("service", "update", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: remote trust data does not exist", + }) +} + +// Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID. +// e.g. if the ingress ID starts with "n1", it was impossible to create a NW named "n1". +func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "inspect", "-f", "{{.Id}}", "ingress") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + ingressID := strings.TrimSpace(out) + c.Assert(ingressID, checker.Not(checker.Equals), "") + + // create a network of which name is the prefix of the ID of an overlay network + // (ingressID in this case) + newNetName := ingressID[0:2] + out, err = d.Cmd("network", "create", "--driver", "overlay", newNetName) + // In #27866, it was failing because of "network with name %s already exists" + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "rm", newNetName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) +} + +// Test case for https://github.com/docker/docker/pull/27938#issuecomment-265768303 +// This test creates two networks with the same name sequentially, with various drivers. +// Since the operations in this test are done sequentially, the 2nd call should fail with +// "network with name FOO already exists". +// Note that it is to ok have multiple networks with the same name if the operations are done +// in parallel. (#18864) +func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *check.C) { + d := s.AddDaemon(c, true, true) + drivers := []string{"bridge", "overlay"} + for i, driver1 := range drivers { + nwName := fmt.Sprintf("network-test-%d", i) + for _, driver2 := range drivers { + c.Logf("Creating a network named %q with %q, then %q", + nwName, driver1, driver2) + out, err := d.Cmd("network", "create", "--driver", driver1, nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "create", "--driver", driver2, nwName) + c.Assert(out, checker.Contains, + fmt.Sprintf("network with name %s already exists", nwName)) + c.Assert(err, checker.NotNil) + c.Logf("As expected, the attempt to network %q with %q failed: %s", + nwName, driver2, out) + out, err = d.Cmd("network", "rm", nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + } + } +} + +func (s *DockerSwarmSuite) TestSwarmServicePsMultipleServiceIDs(c *check.C) { + d := s.AddDaemon(c, true, true) + + name1 := "top1" + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", name1, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + id1 := strings.TrimSpace(out) + + name2 := "top2" + out, err = d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", name2, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + id2 := strings.TrimSpace(out) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 6) + + out, err = d.Cmd("service", "ps", name1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Not(checker.Contains), name2+".1") + c.Assert(out, checker.Not(checker.Contains), name2+".2") + c.Assert(out, checker.Not(checker.Contains), name2+".3") + + out, err = d.Cmd("service", "ps", name1, name2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Contains, name2+".1") + c.Assert(out, checker.Contains, name2+".2") + c.Assert(out, checker.Contains, name2+".3") + + // Name Prefix + out, err = d.Cmd("service", "ps", "to") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Contains, name2+".1") + c.Assert(out, checker.Contains, name2+".2") + c.Assert(out, checker.Contains, name2+".3") + + // Name Prefix (no hit) + out, err = d.Cmd("service", "ps", "noname") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "no such services: noname") + + out, err = d.Cmd("service", "ps", id1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Not(checker.Contains), name2+".1") + c.Assert(out, checker.Not(checker.Contains), name2+".2") + c.Assert(out, checker.Not(checker.Contains), name2+".3") + + out, err = d.Cmd("service", "ps", id1, id2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+".1") + c.Assert(out, checker.Contains, name1+".2") + c.Assert(out, checker.Contains, name1+".3") + c.Assert(out, checker.Contains, name2+".1") + c.Assert(out, checker.Contains, name2+".2") + c.Assert(out, checker.Contains, name2+".3") +} + +func (s *DockerSwarmSuite) TestSwarmPublishDuplicatePorts(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--publish", "5005:80", "--publish", "5006:80", "--publish", "80", "--publish", "80", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // Total len = 4, with 2 dynamic ports and 2 non-dynamic ports + // Dynamic ports are likely to be 30000 and 30001 but doesn't matter + out, err = d.Cmd("service", "inspect", "--format", "{{.Endpoint.Ports}} len={{len .Endpoint.Ports}}", id) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "len=4") + c.Assert(out, checker.Contains, "{ tcp 80 5005 ingress}") + c.Assert(out, checker.Contains, "{ tcp 80 5006 ingress}") +} + +func (s *DockerSwarmSuite) TestSwarmJoinWithDrain(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Drain") + + out, err = d.Cmd("swarm", "join-token", "-q", "manager") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + token := strings.TrimSpace(out) + + d1 := s.AddDaemon(c, false, false) + + out, err = d1.Cmd("swarm", "join", "--availability=drain", "--token", token, d.ListenAddr) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") + + out, err = d1.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") +} + +func (s *DockerSwarmSuite) TestSwarmInitWithDrain(c *check.C) { + d := s.AddDaemon(c, false, false) + + out, err := d.Cmd("swarm", "init", "--availability", "drain") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + out, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") +} + +func (s *DockerSwarmSuite) TestSwarmReadonlyRootfs(c *check.C) { + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top", "--read-only", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.ReadOnly }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") + + containers := d.ActiveContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.HostConfig.ReadonlyRootfs}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + Driver: "bridge", + }, + } + + var n1 types.NetworkCreateResponse + status, body, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + c.Assert(json.Unmarshal(body, &n1), checker.IsNil) + + // Full ID always works + out, err := d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + // Name works if it is unique + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + var n2 types.NetworkCreateResponse + status, body, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + c.Assert(json.Unmarshal(body, &n2), checker.IsNil) + + // Full ID always works + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n2.ID) + + // Name with duplicates + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "network foo is ambiguous (2 matches found based on name)") + + out, err = d.Cmd("network", "rm", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Dupliates with name but with different driver + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, body, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + c.Assert(json.Unmarshal(body, &n2), checker.IsNil) + + // Full ID always works + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n2.ID) + + // Name with duplicates + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "network foo is ambiguous (2 matches found based on name)") +} + +func (s *DockerSwarmSuite) TestSwarmStopSignal(c *check.C) { + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top", "--stop-signal=SIGHUP", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.StopSignal }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGHUP") + + containers := d.ActiveContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.StopSignal}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGHUP") + + out, err = d.Cmd("service", "update", "--stop-signal=SIGUSR1", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.StopSignal }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGUSR1") +} + +func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "top2", "--mode=global", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 2) + + out, err = d.Cmd("service", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "top1") + c.Assert(out, checker.Contains, "top2") + c.Assert(out, checker.Not(checker.Contains), "localnet") + + out, err = d.Cmd("service", "ls", "--filter", "mode=global") + c.Assert(out, checker.Not(checker.Contains), "top1") + c.Assert(out, checker.Contains, "top2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "ls", "--filter", "mode=replicated") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "top1") + c.Assert(out, checker.Not(checker.Contains), "top2") +} + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedDataPathAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + + out, err := d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "data path address must be a non-zero IP") + + out, err = d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0:2000") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "data path address must be a non-zero IP") +} + +func (s *DockerSwarmSuite) TestSwarmJoinLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("swarm", "join-token", "-q", "worker") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + token := strings.TrimSpace(out) + + // Verify that back to back join/leave does not cause panics + d1 := s.AddDaemon(c, false, false) + for i := 0; i < 10; i++ { + out, err = d1.Cmd("swarm", "join", "--token", token, d.ListenAddr) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d1.Cmd("swarm", "leave") + c.Assert(err, checker.IsNil) + } +} + +const defaultRetryCount = 10 + +func waitForEvent(c *check.C, d *daemon.Swarm, since string, filter string, event string, retry int) string { + if retry < 1 { + c.Fatalf("retry count %d is invalid. It should be no less than 1", retry) + return "" + } + var out string + for i := 0; i < retry; i++ { + until := daemonUnixTime(c) + var err error + if len(filter) > 0 { + out, err = d.Cmd("events", "--since", since, "--until", until, filter) + } else { + out, err = d.Cmd("events", "--since", since, "--until", until) + } + c.Assert(err, checker.IsNil, check.Commentf(out)) + if strings.Contains(out, event) { + return strings.TrimSpace(out) + } + // no need to sleep after last retry + if i < retry-1 { + time.Sleep(200 * time.Millisecond) + } + } + c.Fatalf("docker events output '%s' doesn't contain event '%s'", out, event) + return "" +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsSource(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + // create a network + out, err := d1.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + c.Assert(networkID, checker.Not(checker.Equals), "") + + // d1, d2 are managers that can get swarm events + waitForEvent(c, d1, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + waitForEvent(c, d2, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + + // d3 is a worker, not able to get cluster events + out = waitForEvent(c, d3, "0", "-f scope=swarm", "", 1) + c.Assert(out, checker.Not(checker.Contains), "network create ") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsScope(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // scope swarm filters cluster events + out = waitForEvent(c, d, "0", "-f scope=swarm", "service create "+serviceID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "container create ") + + // all events are returned if scope is not specified + waitForEvent(c, d, "0", "", "service create "+serviceID, 1) + waitForEvent(c, d, "0", "", "container create ", defaultRetryCount) + + // scope local only shows non-cluster events + out = waitForEvent(c, d, "0", "-f scope=local", "container create ", 1) + c.Assert(out, checker.Not(checker.Contains), "service create ") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsType(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // create a network + out, err = d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + c.Assert(networkID, checker.Not(checker.Equals), "") + + // filter by service + out = waitForEvent(c, d, "0", "-f type=service", "service create "+serviceID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "network create") + + // filter by network + out = waitForEvent(c, d, "0", "-f type=network", "network create "+networkID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "service create") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsService(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // validate service create event + waitForEvent(c, d, "0", "-f scope=swarm", "service create "+serviceID, defaultRetryCount) + + t1 := daemonUnixTime(c) + out, err = d.Cmd("service", "update", "--force", "--detach=false", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // wait for service update start + out = waitForEvent(c, d, t1, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "updatestate.new=updating") + + // allow service update complete. This is a service with 1 instance + time.Sleep(400 * time.Millisecond) + out = waitForEvent(c, d, t1, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "updatestate.new=completed, updatestate.old=updating") + + // scale service + t2 := daemonUnixTime(c) + out, err = d.Cmd("service", "scale", "test=3") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out = waitForEvent(c, d, t2, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "replicas.new=3, replicas.old=1") + + // remove service + t3 := daemonUnixTime(c) + out, err = d.Cmd("service", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitForEvent(c, d, t3, "-f scope=swarm", "service remove "+serviceID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsNode(c *check.C) { + d1 := s.AddDaemon(c, true, true) + s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d3ID := d3.NodeID + waitForEvent(c, d1, "0", "-f scope=swarm", "node create "+d3ID, defaultRetryCount) + + t1 := daemonUnixTime(c) + out, err := d1.Cmd("node", "update", "--availability=pause", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filter by type + out = waitForEvent(c, d1, t1, "-f type=node", "node update "+d3ID, defaultRetryCount) + c.Assert(out, checker.Contains, "availability.new=pause, availability.old=active") + + t2 := daemonUnixTime(c) + out, err = d1.Cmd("node", "demote", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitForEvent(c, d1, t2, "-f type=node", "node update "+d3ID, defaultRetryCount) + + t3 := daemonUnixTime(c) + out, err = d1.Cmd("node", "rm", "-f", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filter by scope + waitForEvent(c, d1, t3, "-f scope=swarm", "node remove "+d3ID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a network + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + + waitForEvent(c, d, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + + // remove network + t1 := daemonUnixTime(c) + out, err = d.Cmd("network", "rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filtered by network + waitForEvent(c, d, t1, "-f type=network", "network remove "+networkID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsSecret(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + waitForEvent(c, d, "0", "-f scope=swarm", "secret create "+id, defaultRetryCount) + + t1 := daemonUnixTime(c) + d.DeleteSecret(c, id) + // filtered by secret + waitForEvent(c, d, t1, "-f type=secret", "secret remove "+id, defaultRetryCount) +} diff --git a/integration-cli/docker_cli_swarm_unix_test.go b/integration-cli/docker_cli_swarm_unix_test.go new file mode 100644 index 0000000..cffabcc --- /dev/null +++ b/integration-cli/docker_cli_swarm_unix_test.go @@ -0,0 +1,104 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure task stays pending before plugin is available + waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceTasksInState("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1) + + plugin := newVolumePlugin(c, "customvolumedriver") + defer plugin.Close() + + // create a dummy volume to trigger lazy loading of the plugin + out, err = d.Cmd("volume", "create", "-d", "customvolumedriver", "hello") + + // TODO(aaronl): It will take about 15 seconds for swarm to realize the + // plugin was loaded. Switching the test over to plugin v2 would avoid + // this long delay. + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + containerID := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "-f", "{{json .Mounts}}", containerID) + c.Assert(err, checker.IsNil) + + var mounts []struct { + Name string + Driver string + } + + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "my-volume") + c.Assert(mounts[0].Driver, checker.Equals, "customvolumedriver") +} + +// Test network plugin filter in swarm +func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *check.C) { + testRequires(c, IsAmd64) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + // install plugin on d1 and d2 + pluginName := "aragunathan/global-net-plugin:latest" + + _, err := d1.Cmd("plugin", "install", pluginName, "--grant-all-permissions") + c.Assert(err, checker.IsNil) + + _, err = d2.Cmd("plugin", "install", pluginName, "--grant-all-permissions") + c.Assert(err, checker.IsNil) + + // create network + networkName := "globalnet" + _, err = d1.Cmd("network", "create", "--driver", pluginName, networkName) + c.Assert(err, checker.IsNil) + + // create a global service to ensure that both nodes will have an instance + serviceName := "my-service" + _, err = d1.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, "busybox", "top") + c.Assert(err, checker.IsNil) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, 2) + + // remove service + _, err = d1.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil) + + // wait to ensure all containers have exited before removing the plugin. Else there's a + // possibility of container exits erroring out due to plugins being unavailable. + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, 0) + + // disable plugin on worker + _, err = d2.Cmd("plugin", "disable", "-f", pluginName) + c.Assert(err, checker.IsNil) + + time.Sleep(20 * time.Second) + + image := "busybox" + // create a new global service again. + _, err = d1.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, image, "top") + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image: 1}) +} diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go new file mode 100644 index 0000000..907977f --- /dev/null +++ b/integration-cli/docker_cli_tag_test.go @@ -0,0 +1,168 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +// tagging a named image in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { + imageID := inspectField(c, "busybox", "Id") + dockerCmd(c, "tag", imageID, "testfoobarbaz") +} + +// ensure we don't allow the use of invalid repository names; these tag operations should fail +func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} + + for _, repo := range invalidRepos { + out, _, err := dockerCmdWithError("tag", "busybox", repo) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repo, out)) + } +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { + longTag := stringutils.GenerateRandomAlphaOnlyString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} + + for _, repotag := range invalidTags { + out, _, err := dockerCmdWithError("tag", "busybox", repotag) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repotag, out)) + } +} + +// ensure we allow the use of valid tags +func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} + + for _, repo := range validRepos { + _, _, err := dockerCmdWithError("tag", "busybox:latest", repo) + if err != nil { + c.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + deleteImages(repo) + } +} + +// tag an image with an existed tag name without -f option should work +func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "busybox:test") +} + +func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { + // test repository name begin with '-' + out, _, err := dockerCmdWithError("tag", "busybox:latest", "-busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test namespace name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-test/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test index name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) +} + +// ensure tagging using official names works +// ensure all tags result in the same name +func (s *DockerSuite) TestTagOfficialNames(c *check.C) { + names := []string{ + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + + for _, name := range names { + out, exitCode, err := dockerCmdWithError("tag", "busybox:latest", name+":latest") + if err != nil || exitCode != 0 { + c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) + continue + } + + // ensure we don't have multiple tag names. + out, _, err = dockerCmdWithError("images") + if err != nil { + c.Errorf("listing images failed with errors: %v, %s", err, out) + } else if strings.Contains(out, name) { + c.Errorf("images should not have listed '%s'", name) + deleteImages(name + ":latest") + } + } + + for _, name := range names { + _, exitCode, err := dockerCmdWithError("tag", name+":latest", "fooo/bar:latest") + if err != nil || exitCode != 0 { + c.Errorf("tag %v fooo/bar should have worked: %s", name, err) + continue + } + deleteImages("fooo/bar:latest") + } +} + +// ensure tags can not match digests +func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { + digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", digest) + if err == nil { + c.Fatal("digest tag a name should have failed") + } + // check that no new image matches the digest + _, _, err = dockerCmdWithError("inspect", digest) + if err == nil { + c.Fatal("inspecting by digest should have failed") + } +} + +func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", "sha256:sometag") + if err == nil { + c.Fatal("tagging with image named \"sha256\" should have failed") + } +} + +// ensure tags cannot create ambiguity with image ids +func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { + buildImageSuccessfully(c, "notbusybox:latest", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio`)) + imageID := getIDByName(c, "notbusybox:latest") + truncatedImageID := stringid.TruncateID(imageID) + truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) + + id := inspectField(c, truncatedTag, "Id") + + // Ensure inspect by image id returns image for image id + c.Assert(id, checker.Equals, imageID) + c.Logf("Built image: %s", imageID) + + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", truncatedTag) + if err != nil { + c.Fatalf("Error tagging with an image id: %s", err) + } + + id = inspectField(c, truncatedTag, "Id") + + // Ensure id is imageID and not busybox:latest + c.Assert(id, checker.Not(checker.Equals), imageID) +} diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go new file mode 100644 index 0000000..ea32fc6 --- /dev/null +++ b/integration-cli/docker_cli_top_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + var expected icmd.Expected + switch testEnv.DaemonPlatform() { + case "windows": + expected = icmd.Expected{ExitCode: 1, Err: "Windows does not support arguments to top"} + default: + expected = icmd.Expected{Out: "PID"} + } + result := dockerCmdWithResult("top", cleanedContainerID, "-o", "pid") + c.Assert(result, icmd.Matches, expected) +} + +func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + // Windows will list the name of the launched executable which in this case is busybox.exe, without the parameters. + // Linux will display the command executed in the container + var lookingFor string + if testEnv.DaemonPlatform() == "windows" { + lookingFor = "busybox.exe" + } else { + lookingFor = "top" + } + + c.Assert(out1, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the first time", lookingFor)) + c.Assert(out2, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the second time", lookingFor)) +} + +// TestTopWindowsCoreProcesses validates that there are lines for the critical +// processes which are found in a Windows container. Note Windows is architecturally +// very different to Linux in this regard. +func (s *DockerSuite) TestTopWindowsCoreProcesses(c *check.C) { + testRequires(c, DaemonIsWindows) + out := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + out1, _ := dockerCmd(c, "top", cleanedContainerID) + lookingFor := []string{"smss.exe", "csrss.exe", "wininit.exe", "services.exe", "lsass.exe", "CExecSvc.exe"} + for i, s := range lookingFor { + c.Assert(out1, checker.Contains, s, check.Commentf("top should've listed `%s` in the process list, but failed. Test case %d", s, i)) + } +} + +func (s *DockerSuite) TestTopPrivileged(c *check.C) { + // Windows does not support --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "-i", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) + c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) +} diff --git a/integration-cli/docker_cli_update_test.go b/integration-cli/docker_cli_update_test.go new file mode 100644 index 0000000..c898690 --- /dev/null +++ b/integration-cli/docker_cli_update_test.go @@ -0,0 +1,43 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false").Combined() + timeout := 60 * time.Second + if testEnv.DaemonPlatform() == "windows" { + timeout = 180 * time.Second + } + + id := strings.TrimSpace(string(out)) + + // update restart policy to on-failure:5 + cli.DockerCmd(c, "update", "--restart=on-failure:5", id) + + cli.WaitExited(c, id, timeout) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "5") + + maximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(maximumRetryCount, checker.Equals, "5") +} + +func (s *DockerSuite) TestUpdateRestartWithAutoRemoveFlag(c *check.C) { + out := runSleepingContainer(c, "--rm") + id := strings.TrimSpace(out) + + // update restart policy for an AutoRemove container + cli.Docker(cli.Args("update", "--restart=always", id)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Restart policy cannot be updated because AutoRemove is enabled for the container", + }) +} diff --git a/integration-cli/docker_cli_update_unix_test.go b/integration-cli/docker_cli_update_unix_test.go new file mode 100644 index 0000000..2b75941 --- /dev/null +++ b/integration-cli/docker_cli_update_unix_test.go @@ -0,0 +1,327 @@ +// +build !windows + +package main + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/go-check/check" + "github.com/kr/pty" +) + +func (s *DockerSuite) TestUpdateRunningContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateRunningContainerWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + dockerCmd(c, "restart", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + dockerCmd(c, "run", "--name", name, "-m", "300M", "busybox", "cat", file) + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + out, _ := dockerCmd(c, "start", "-a", name) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdatePausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--cpu-shares", "1000", "busybox", "top") + dockerCmd(c, "pause", name) + dockerCmd(c, "update", "--cpu-shares", "500", name) + + c.Assert(inspectField(c, name, "HostConfig.CPUShares"), checker.Equals, "500") + + dockerCmd(c, "unpause", name) + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "500") +} + +func (s *DockerSuite) TestUpdateWithUntouchedFields(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "--cpu-shares", "800", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + // Update memory and not touch cpus, `cpuset.cpus` should still have the old value + out := inspectField(c, name, "HostConfig.CPUShares") + c.Assert(out, check.Equals, "800") + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "800") +} + +func (s *DockerSuite) TestUpdateContainerInvalidValue(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + out, _, err := dockerCmdWithError("update", "-m", "2M", name) + c.Assert(err, check.NotNil) + expected := "Minimum memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestUpdateContainerWithoutFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + _, _, err := dockerCmdWithError("update", name) + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--kernel-memory", "50M", "busybox", "top") + dockerCmd(c, "update", "--kernel-memory", "100M", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "104857600") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "104857600") +} + +func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + isNewKernel := kernel.CheckKernelVersion(4, 6, 0) + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") + _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) + // Update kernel memory to a running container without kernel memory initialized + // is not allowed before kernel version 4.6. + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "pause", name) + _, _, err = dockerCmdWithError("update", "--kernel-memory", "200M", name) + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + dockerCmd(c, "unpause", name) + + dockerCmd(c, "stop", name) + dockerCmd(c, "update", "--kernel-memory", "300M", name) + dockerCmd(c, "start", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "314572800") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") +} + +func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateInvalidSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + _, _, err := dockerCmdWithError("update", "--memory-swap", "200M", name) + // Update invalid swap memory should fail. + // This will pass docker config validation, but failed at kernel validation + c.Assert(err, check.NotNil) + + // Update invalid swap memory with failure should not change HostConfig + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateStats(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuCfsQuota) + name := "foo" + dockerCmd(c, "run", "-d", "-ti", "--name", name, "-m", "500m", "busybox") + + c.Assert(waitRun(name), checker.IsNil) + + getMemLimit := func(id string) uint64 { + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + return v.MemoryStats.Limit + } + preMemLimit := getMemLimit(name) + + dockerCmd(c, "update", "--cpu-quota", "2000", name) + + curMemLimit := getMemLimit(name) + + c.Assert(preMemLimit, checker.Equals, curMemLimit) + +} + +func (s *DockerSuite) TestUpdateMemoryWithSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "busybox", "top") + out, _, err := dockerCmdWithError("update", "--memory", "800M", name) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Memory limit should be smaller than already set memoryswap limit") + + dockerCmd(c, "update", "--memory", "800M", "--memory-swap", "1000M", name) +} + +func (s *DockerSuite) TestUpdateNotAffectMonitorRestartPolicy(c *check.C) { + testRequires(c, DaemonIsLinux, cpuShare) + + out, _ := dockerCmd(c, "run", "-tid", "--restart=always", "busybox", "sh") + id := strings.TrimSpace(string(out)) + dockerCmd(c, "update", "--cpu-shares", "512", id) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + _, err = cpty.Write([]byte("exit\n")) + c.Assert(err, checker.IsNil) + + c.Assert(cmd.Wait(), checker.IsNil) + + // container should restart again and keep running + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(id), checker.IsNil) +} + +func (s *DockerSuite) TestUpdateWithNanoCPUs(c *check.C) { + testRequires(c, cpuCfsQuota, cpuCfsPeriod) + + file1 := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + file2 := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + + out, _ := dockerCmd(c, "run", "-d", "--cpus", "0.5", "--name", "top", "busybox", "top") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "exec", "top", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") + + clt, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + inspect, err := clt.ContainerInspect(context.Background(), "top") + c.Assert(err, checker.IsNil) + c.Assert(inspect.HostConfig.NanoCPUs, checker.Equals, int64(500000000)) + + out = inspectField(c, "top", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "top", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _, err = dockerCmdWithError("update", "--cpu-quota", "80000", "top") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + + out, _ = dockerCmd(c, "update", "--cpus", "0.8", "top") + inspect, err = clt.ContainerInspect(context.Background(), "top") + c.Assert(err, checker.IsNil) + c.Assert(inspect.HostConfig.NanoCPUs, checker.Equals, int64(800000000)) + + out = inspectField(c, "top", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "top", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _ = dockerCmd(c, "exec", "top", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "80000\n100000") +} diff --git a/integration-cli/docker_cli_userns_test.go b/integration-cli/docker_cli_userns_test.go new file mode 100644 index 0000000..8311401 --- /dev/null +++ b/integration-cli/docker_cli_userns_test.go @@ -0,0 +1,99 @@ +// +build !windows + +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +// user namespaces test: run daemon with remapped root setting +// 1. validate uid/gid maps are set properly +// 2. verify that files created are owned by remapped root +func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, UserNamespaceInKernel) + + s.d.StartWithBusybox(c, "--userns-remap", "default") + + tmpDir, err := ioutil.TempDir("", "userns") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Set a non-existent path + tmpDirNotExists := path.Join(os.TempDir(), "userns"+stringid.GenerateRandomID()) + defer os.RemoveAll(tmpDirNotExists) + + // we need to find the uid and gid of the remapped root from the daemon's root dir info + uidgid := strings.Split(filepath.Base(s.d.Root), ".") + c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.Root))) + uid, err := strconv.Atoi(uidgid[0]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) + gid, err := strconv.Atoi(uidgid[1]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse gid")) + + // writable by the remapped root UID/GID pair + c.Assert(os.Chown(tmpDir, uid, gid), checker.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "userns", "-v", tmpDir+":/goofy", "-v", tmpDirNotExists+":/donald", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user := s.findUser(c, "userns") + c.Assert(uidgid[0], checker.Equals, user) + + // check that the created directory is owned by remapped uid:gid + statNotExists, err := system.Stat(tmpDirNotExists) + c.Assert(err, checker.IsNil) + c.Assert(statNotExists.UID(), checker.Equals, uint32(uid), check.Commentf("Created directory not owned by remapped root UID")) + c.Assert(statNotExists.GID(), checker.Equals, uint32(gid), check.Commentf("Created directory not owned by remapped root GID")) + + pid, err := s.d.Cmd("inspect", "--format={{.State.Pid}}", "userns") + c.Assert(err, checker.IsNil, check.Commentf("Could not inspect running container: out: %q", pid)) + // check the uid and gid maps for the PID to ensure root is remapped + // (cmd = cat /proc//uid_map | grep -E '0\s+9999\s+1') + out, rc1, err := testutil.RunCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/uid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", uid))) + c.Assert(rc1, checker.Equals, 0, check.Commentf("Didn't match uid_map: output: %s", out)) + + out, rc2, err := testutil.RunCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/gid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", gid))) + c.Assert(rc2, checker.Equals, 0, check.Commentf("Didn't match gid_map: output: %s", out)) + + // check that the touched file is owned by remapped uid:gid + stat, err := system.Stat(filepath.Join(tmpDir, "testfile")) + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Touched file not owned by remapped root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Touched file not owned by remapped root GID")) + + // use host usernamespace + out, err = s.d.Cmd("run", "-d", "--name", "userns_skip", "--userns", "host", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user = s.findUser(c, "userns_skip") + // userns are skipped, user is root + c.Assert(user, checker.Equals, "root") +} + +// findUser finds the uid or name of the user of the first process that runs in a container +func (s *DockerDaemonSuite) findUser(c *check.C, container string) string { + out, err := s.d.Cmd("top", container) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + rows := strings.Split(out, "\n") + if len(rows) < 2 { + // No process rows founds + c.FailNow() + } + return strings.Fields(rows[1])[0] +} diff --git a/integration-cli/docker_cli_v2_only_test.go b/integration-cli/docker_cli_v2_only_test.go new file mode 100644 index 0000000..b82cdbd --- /dev/null +++ b/integration-cli/docker_cli_v2_only_test.go @@ -0,0 +1,120 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/docker/docker/integration-cli/registry" + "github.com/go-check/check" +) + +func makefile(path string, contents string) (string, error) { + f, err := ioutil.TempFile(path, "tmp") + if err != nil { + return "", err + } + err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) + if err != nil { + return "", err + } + return f.Name(), nil +} + +// TestV2Only ensures that a daemon by default does not +// attempt to contact any v1 registry endpoints. +func (s *DockerRegistrySuite) TestV2Only(c *check.C) { + reg, err := registry.NewMock(c) + defer reg.Close() + c.Assert(err, check.IsNil) + + reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + }) + + reg.RegisterHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { + c.Fatal("V1 registry contacted") + }) + + repoName := fmt.Sprintf("%s/busybox", reg.URL()) + + s.d.Start(c, "--insecure-registry", reg.URL()) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL())) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + + s.d.Cmd("build", "--file", dockerfileName, tmp) + + s.d.Cmd("run", repoName) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + s.d.Cmd("pull", repoName) +} + +// TestV1 starts a daemon with legacy registries enabled +// and ensure v1 endpoints are hit for the following operations: +// login, push, pull, build & run +func (s *DockerRegistrySuite) TestV1(c *check.C) { + reg, err := registry.NewMock(c) + defer reg.Close() + c.Assert(err, check.IsNil) + + v2Pings := 0 + reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + v2Pings++ + // V2 ping 404 causes fallback to v1 + w.WriteHeader(404) + }) + + v1Pings := 0 + reg.RegisterHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { + v1Pings++ + }) + + v1Logins := 0 + reg.RegisterHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { + v1Logins++ + }) + + v1Repo := 0 + reg.RegisterHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + reg.RegisterHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + s.d.Start(c, "--insecure-registry", reg.URL(), "--disable-legacy-registry=false") + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL())) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + + s.d.Cmd("build", "--file", dockerfileName, tmp) + c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build")) + + repoName := fmt.Sprintf("%s/busybox", reg.URL()) + s.d.Cmd("run", repoName) + c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run")) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) + c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt")) + + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + + c.Assert(v1Repo, check.Equals, 2) + + s.d.Cmd("pull", repoName) + c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull")) +} diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go new file mode 100644 index 0000000..074a7db --- /dev/null +++ b/integration-cli/docker_cli_version_test.go @@ -0,0 +1,58 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// ensure docker version works +func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "version") + stringsToCheck := map[string]int{ + "Client:": 1, + "Server:": 1, + " Version:": 2, + " API version:": 2, + " Go version:": 2, + " Git commit:": 2, + " OS/Arch:": 2, + " Built:": 2, + } + + for k, v := range stringsToCheck { + c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) + } +} + +// ensure the Windows daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { + testRequires(c, DaemonIsWindows) + testVersionPlatform(c, "windows/amd64") +} + +// ensure the Linux daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { + testRequires(c, DaemonIsLinux) + testVersionPlatform(c, "linux") +} + +func testVersionPlatform(c *check.C, platform string) { + out, _ := dockerCmd(c, "version") + expected := "OS/Arch: " + platform + + split := strings.Split(out, "\n") + c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) + + // Verify the second 'OS/Arch' matches the platform. Experimental has + // more lines of output than 'regular' + bFound := false + for i := 14; i < len(split); i++ { + if strings.Contains(split[i], expected) { + bFound = true + break + } + } + c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) +} diff --git a/integration-cli/docker_cli_volume_test.go b/integration-cli/docker_cli_volume_test.go new file mode 100644 index 0000000..e8c837c --- /dev/null +++ b/integration-cli/docker_cli_volume_test.go @@ -0,0 +1,643 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumeCLICreate(c *check.C) { + dockerCmd(c, "volume", "create") + + _, _, err := dockerCmdWithError("volume", "create", "-d", "nosuchdriver") + c.Assert(err, check.NotNil) + + // test using hidden --name option + out, _ := dockerCmd(c, "volume", "create", "--name=test") + name := strings.TrimSpace(out) + c.Assert(name, check.Equals, "test") + + out, _ = dockerCmd(c, "volume", "create", "test2") + name = strings.TrimSpace(out) + c.Assert(name, check.Equals, "test2") +} + +func (s *DockerSuite) TestVolumeCLIInspect(c *check.C) { + c.Assert( + exec.Command(dockerBinary, "volume", "inspect", "doesntexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume inspect should error on non-existent volume"), + ) + + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", name) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + dockerCmd(c, "volume", "create", "test") + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", "test") + c.Assert(strings.TrimSpace(out), check.Equals, "test") +} + +func (s *DockerSuite) TestVolumeCLIInspectMulti(c *check.C) { + dockerCmd(c, "volume", "create", "test1") + dockerCmd(c, "volume", "create", "test2") + dockerCmd(c, "volume", "create", "test3") + + result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesntexist", "test3") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such volume: doesntexist", + }) + + out := result.Stdout() + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 3, check.Commentf("\n%s", out)) + + c.Assert(out, checker.Contains, "test1") + c.Assert(out, checker.Contains, "test2") + c.Assert(out, checker.Contains, "test3") +} + +func (s *DockerSuite) TestVolumeCLILs(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "aaa") + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "volume", "create", "soo") + dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/") + + out, _ := dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + + assertVolList(c, out, []string{"aaa", "soo", "test"}) +} + +func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + out, _ := dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa", "soo", "test"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + config := `{ + "volumesFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "volume", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa default", "soo default", "test default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// assertVolList checks volume retrieved with ls command +// equals to expected volume list +// note: out should be `volume ls [option]` result +func assertVolList(c *check.C, out string, expectVols []string) { + lines := strings.Split(out, "\n") + var volList []string + for _, line := range lines[1 : len(lines)-1] { + volFields := strings.Fields(line) + // wrap all volume name in volList + volList = append(volList, volFields[1]) + } + + // volume ls should contains all expected volumes + c.Assert(volList, checker.DeepEquals, expectVols) +} + +func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "testnotinuse1") + dockerCmd(c, "volume", "create", "testisinuse1") + dockerCmd(c, "volume", "create", "testisinuse2") + + // Make sure both "created" (but not started), and started + // containers are included in reference counting + dockerCmd(c, "run", "--name", "volume-test1", "-v", "testisinuse1:"+prefix+"/foo", "busybox", "true") + dockerCmd(c, "create", "--name", "volume-test2", "-v", "testisinuse2:"+prefix+"/foo", "busybox", "true") + + out, _ := dockerCmd(c, "volume", "ls") + + // No filter, all volumes should show + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") + + // Explicitly disabling dangling + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true") + + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=1") + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output, dangling also accept 1 + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=0") + // dangling=0 is same as dangling=false case + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "name=testisin") + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("execpeted volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) +} + +func (s *DockerSuite) TestVolumeCLILsErrorWithInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLILsWithIncorrectFilterValue(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "rm", id) + dockerCmd(c, "volume", "rm", "test") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + volumeID := "testing" + dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") + + icmd.RunCommand(dockerBinary, "volume", "rm", "testing").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + out, _ = dockerCmd(c, "run", "--volumes-from=test", "--name=test2", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + dockerCmd(c, "rm", "-fv", "test2") + dockerCmd(c, "volume", "inspect", volumeID) + dockerCmd(c, "rm", "-f", "test") + + out, _ = dockerCmd(c, "run", "--name=test2", "-v", volumeID+":"+prefix+"/foo", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello", check.Commentf("volume data was removed")) + dockerCmd(c, "rm", "test2") + + dockerCmd(c, "volume", "rm", volumeID) + c.Assert( + exec.Command("volume", "rm", "doesntexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume rm should fail with non-existent volume"), + ) +} + +// FIXME(vdemeester) should be a unit test in cli/command/volume package +func (s *DockerSuite) TestVolumeCLINoArgs(c *check.C) { + out, _ := dockerCmd(c, "volume") + // no args should produce the cmd usage output + usage := "Usage: docker volume COMMAND" + c.Assert(out, checker.Contains, usage) + + // invalid arg should error and show the command usage on stderr + icmd.RunCommand(dockerBinary, "volume", "somearg").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: usage, + }) + + // invalid flag should error and show the flag error and cmd usage + result := icmd.RunCommand(dockerBinary, "volume", "--no-such-flag") + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Error: "exit status 125", + Err: usage, + }) + c.Assert(result.Stderr(), checker.Contains, "unknown flag: --no-such-flag") +} + +func (s *DockerSuite) TestVolumeCLIInspectTmplError(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + + out, exitCode, err := dockerCmdWithError("volume", "inspect", "--format='{{ .FooBar }}'", name) + c.Assert(err, checker.NotNil, check.Commentf("Output: %s", out)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestVolumeCLICreateWithOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "volume", "create", "-d", "local", "test", "--opt=type=tmpfs", "--opt=device=tmpfs", "--opt=o=size=1m,uid=1000") + out, _ := dockerCmd(c, "run", "-v", "test:/foo", "busybox", "mount") + + mounts := strings.Split(out, "\n") + var found bool + for _, m := range mounts { + if strings.Contains(m, "/foo") { + found = true + info := strings.Fields(m) + // tmpfs on type tmpfs (rw,relatime,size=1024k,uid=1000) + c.Assert(info[0], checker.Equals, "tmpfs") + c.Assert(info[2], checker.Equals, "/foo") + c.Assert(info[4], checker.Equals, "tmpfs") + c.Assert(info[5], checker.Contains, "uid=1000") + c.Assert(info[5], checker.Contains, "size=1024k") + break + } + } + c.Assert(found, checker.Equals, true) +} + +func (s *DockerSuite) TestVolumeCLICreateLabel(c *check.C) { + testVol := "testvolcreatelabel" + testLabel := "foo" + testValue := "bar" + + out, _, err := dockerCmdWithError("volume", "create", "--label", testLabel+"="+testValue, testVol) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+testLabel+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) +} + +func (s *DockerSuite) TestVolumeCLICreateLabelMultiple(c *check.C) { + testVol := "testvolcreatelabel" + + testLabels := map[string]string{ + "foo": "bar", + "baz": "foo", + } + + args := []string{ + "volume", + "create", + testVol, + } + + for k, v := range testLabels { + args = append(args, "--label", k+"="+v) + } + + out, _, err := dockerCmdWithError(args...) + c.Assert(err, check.IsNil) + + for k, v := range testLabels { + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+k+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, v) + } +} + +func (s *DockerSuite) TestVolumeCLILsFilterLabels(c *check.C) { + testVol1 := "testvolcreatelabel-1" + out, _, err := dockerCmdWithError("volume", "create", "--label", "foo=bar1", testVol1) + c.Assert(err, check.IsNil) + + testVol2 := "testvolcreatelabel-2" + out, _, err = dockerCmdWithError("volume", "create", "--label", "foo=bar2", testVol2) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo") + + // filter with label=key + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, checker.Contains, "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=bar1") + + // filter with label=key=value + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, check.Not(checker.Contains), "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2 in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=non-exist") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=non-exist") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) +} + +func (s *DockerSuite) TestVolumeCLILsFilterDrivers(c *check.C) { + // using default volume driver local to create volumes + testVol1 := "testvol-1" + out, _, err := dockerCmdWithError("volume", "create", testVol1) + c.Assert(err, check.IsNil) + + testVol2 := "testvol-2" + out, _, err = dockerCmdWithError("volume", "create", testVol2) + c.Assert(err, check.IsNil) + + // filter with driver=local + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=local") + c.Assert(out, checker.Contains, "testvol-1\n", check.Commentf("expected volume 'testvol-1' in output")) + c.Assert(out, checker.Contains, "testvol-2\n", check.Commentf("expected volume 'testvol-2' in output")) + + // filter with driver=invaliddriver + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=invaliddriver") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + // filter with driver=loca + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=loca") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + // filter with driver= + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForceUsage(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "rm", "-f", id) + dockerCmd(c, "volume", "rm", "--force", "nonexist") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + out, _ = dockerCmd(c, "volume", "inspect", "--format", "{{.Mountpoint}}", name) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + // Mountpoint is in the form of "/var/lib/docker/volumes/.../_data", removing `/_data` + path := strings.TrimSuffix(strings.TrimSpace(out), "/_data") + icmd.RunCommand("rm", "-rf", path).Assert(c, icmd.Success) + + dockerCmd(c, "volume", "rm", "-f", name) + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Not(checker.Contains), name) + dockerCmd(c, "volume", "create", name) + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) +} + +// TestVolumeCLIRmForceInUse verifies that repeated `docker volume rm -f` calls does not remove a volume +// if it is in use. Test case for https://github.com/docker/docker/issues/31446 +func (s *DockerSuite) TestVolumeCLIRmForceInUse(c *check.C) { + name := "testvolume" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out, e := dockerCmd(c, "create", "-v", "testvolume:"+prefix+slash+"foo", "busybox") + cid := strings.TrimSpace(out) + + _, _, err := dockerCmdWithError("volume", "rm", "-f", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "volume is in use") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) + + // The original issue did not _remove_ the volume from the list + // the first time. But a second call to `volume rm` removed it. + // Calling `volume rm` a second time to confirm it's not removed + // when calling twice. + _, _, err = dockerCmdWithError("volume", "rm", "-f", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "volume is in use") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) + + // Verify removing the volume after the container is removed works + _, e = dockerCmd(c, "rm", cid) + c.Assert(e, check.Equals, 0) + + _, e = dockerCmd(c, "volume", "rm", "-f", name) + c.Assert(e, check.Equals, 0) + + out, e = dockerCmd(c, "volume", "ls") + c.Assert(e, check.Equals, 0) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSuite) TestVolumeCliInspectWithVolumeOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Without options + name := "test1" + dockerCmd(c, "volume", "create", "-d", "local", name) + out, _ := dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, "map[]") + + // With options + name = "test2" + k1, v1 := "type", "tmpfs" + k2, v2 := "device", "tmpfs" + k3, v3 := "o", "size=1m,uid=1000" + dockerCmd(c, "volume", "create", "-d", "local", name, "--opt", fmt.Sprintf("%s=%s", k1, v1), "--opt", fmt.Sprintf("%s=%s", k2, v2), "--opt", fmt.Sprintf("%s=%s", k3, v3)) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k1, v1)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k2, v2)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k3, v3)) +} + +// Test case (1) for 21845: duplicate targets for --volumes-from +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFrom(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + out, _, err := dockerCmdWithError("run", "--name=app", "--volumes-from=data1", "--volumes-from=data2", "-d", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf("Out: %s", out)) + + // Only the second volume will be referenced, this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Equals, data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} + +// Test case (2) for 21845: duplicate targets for --volumes-from and -v (bind) +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFromAndBind(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + // /tmp/data is automatically created, because we are not using the modern mount API here + out, _, err := dockerCmdWithError("run", "--name=app", "--volumes-from=data1", "--volumes-from=data2", "-v", "/tmp/data:/tmp/data", "-d", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf("Out: %s", out)) + + // No volume will be referenced (mount is /tmp/data), this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} + +// Test case (3) for 21845: duplicate targets for --volumes-from and `Mounts` (API only) +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFromAndMounts(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + err := os.MkdirAll("/tmp/data", 0755) + c.Assert(err, checker.IsNil) + // Mounts is available in API + status, body, err := request.SockRequest("POST", "/containers/create?name=app", map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"top"}, + "HostConfig": map[string]interface{}{ + "VolumesFrom": []string{ + "data1", + "data2", + }, + "Mounts": []map[string]interface{}{ + { + "Type": "bind", + "Source": "/tmp/data", + "Target": "/tmp/data", + }, + }}, + }, daemonHost()) + + c.Assert(err, checker.IsNil, check.Commentf(string(body))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(body))) + + // No volume will be referenced (mount is /tmp/data), this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} diff --git a/integration-cli/docker_cli_wait_test.go b/integration-cli/docker_cli_wait_test.go new file mode 100644 index 0000000..6f45bf0 --- /dev/null +++ b/integration-cli/docker_cli_wait_test.go @@ -0,0 +1,98 @@ +package main + +import ( + "bytes" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// non-blocking wait with 0 exit code +func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "true") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "0", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with 0 exit code +func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { + // Windows busybox does not support trap in this way, not sleep with sub-second + // granularity. It will always exit 0x40010004. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan string) + go func() { + chWait <- "" + out := icmd.RunCommand(dockerBinary, "wait", containerID).Combined() + chWait <- out + }() + + <-chWait // make sure the goroutine is started + time.Sleep(100 * time.Millisecond) + dockerCmd(c, "stop", containerID) + + select { + case status := <-chWait: + c.Assert(strings.TrimSpace(status), checker.Equals, "0", check.Commentf("expected exit 0, got %s", status)) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for `docker wait` to exit") + } + +} + +// non-blocking wait with random exit code +func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "exit 99") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "99", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with random exit code +func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { + // Cannot run on Windows as trap in Windows busybox does not support trap in this way. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan error) + waitCmd := exec.Command(dockerBinary, "wait", containerID) + waitCmdOut := bytes.NewBuffer(nil) + waitCmd.Stdout = waitCmdOut + c.Assert(waitCmd.Start(), checker.IsNil) + go func() { + chWait <- waitCmd.Wait() + }() + + dockerCmd(c, "stop", containerID) + + select { + case err := <-chWait: + c.Assert(err, checker.IsNil, check.Commentf(waitCmdOut.String())) + status, err := waitCmdOut.ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(status), checker.Equals, "99", check.Commentf("expected exit 99, got %s", status)) + case <-time.After(2 * time.Second): + waitCmd.Process.Kill() + c.Fatal("timeout waiting for `docker wait` to exit") + } +} diff --git a/integration-cli/docker_deprecated_api_v124_test.go b/integration-cli/docker_deprecated_api_v124_test.go new file mode 100644 index 0000000..a3a8fbd --- /dev/null +++ b/integration-cli/docker_deprecated_api_v124_test.go @@ -0,0 +1,229 @@ +// This file will be removed when we completely drop support for +// passing HostConfig to container start API. + +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/docker/docker/pkg/testutil" + "github.com/go-check/check" +) + +func formatV123StartAPIURL(url string) string { + return "/v1.23" + url +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartHostConfig(c *check.C) { + name := "test-deprecated-api-124" + dockerCmd(c, "create", "--name", name, "busybox") + config := map[string]interface{}{ + "Binds": []string{"/aa:/bb"}, + } + status, body, err := request.SockRequest("POST", "/containers/"+name+"/start", config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(body), checker.Contains, "was deprecated since v1.10") +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumeBinds(c *check.C) { + // TODO Windows CI: Investigate further why this fails on Windows to Windows CI. + testRequires(c, DaemonIsLinux) + path := "/foo" + if testEnv.DaemonPlatform() == "windows" { + path = `c:\foo` + } + name := "testing" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{path: {}}, + } + + status, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath := testutil.RandomTmpDirPath("test", testEnv.DaemonPlatform()) + config = map[string]interface{}{ + "Binds": []string{bindPath + ":" + path}, + } + status, _, err = request.SockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, path) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, bindPath, check.Commentf("expected volume host path to be %s, got %s", bindPath, pth)) +} + +// Test for GH#10618 +func (s *DockerSuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + name := "testdups" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath1 := testutil.RandomTmpDirPath("test1", testEnv.DaemonPlatform()) + bindPath2 := testutil.RandomTmpDirPath("test2", testEnv.DaemonPlatform()) + + config = map[string]interface{}{ + "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, + } + status, body, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)) +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumesFrom(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + volName := "voltst" + volPath := "/tmp" + + dockerCmd(c, "run", "--name", volName, "-v", volPath, "busybox") + + name := "TestContainerAPIStartVolumesFrom" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{volPath: {}}, + } + + status, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config = map[string]interface{}{ + "VolumesFrom": []string{volName}, + } + status, _, err = request.SockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, volPath) + c.Assert(err, checker.IsNil) + pth2, err := inspectMountSourceField(volName, volPath) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, pth2, check.Commentf("expected volume host path to be %s, got %s", pth, pth2)) +} + +// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume +func (s *DockerSuite) TestDeprecatedPostContainerBindNormalVolume(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox") + + fooDir, err := inspectMountSourceField("one", "/foo") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") + + bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} + status, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/two/start"), bindSpec, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + fooDir2, err := inspectMountSourceField("two", "/foo") + c.Assert(err, checker.IsNil) + c.Assert(fooDir2, checker.Equals, fooDir, check.Commentf("expected volume path to be %s, got: %s", fooDir, fooDir2)) +} + +func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + + containerID := strings.TrimSpace(out) + + config := `{ + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + b, err2 := testutil.ReadBody(body) + c.Assert(err2, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithoutLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, append([]string{"create", "--name", name, "busybox"}, sleepCommandForDaemonPlatform()...)...) + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top") + dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") + id := strings.TrimSpace(out) + dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +func (s *DockerSuite) TestDeprecatedStartWithNilDNS(c *check.C) { + // TODO Windows: Add once DNS is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + containerID := strings.TrimSpace(out) + + config := `{"HostConfig": {"Dns": null}}` + + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() + + dns := inspectFieldJSON(c, containerID, "HostConfig.Dns") + c.Assert(dns, checker.Equals, "[]") +} diff --git a/integration-cli/docker_deprecated_api_v124_unix_test.go b/integration-cli/docker_deprecated_api_v124_unix_test.go new file mode 100644 index 0000000..5fc6c2d --- /dev/null +++ b/integration-cli/docker_deprecated_api_v124_unix_test.go @@ -0,0 +1,31 @@ +// +build !windows + +package main + +import ( + "fmt" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" + "github.com/go-check/check" +) + +// #19100 This is a deprecated feature test, it should be removed in Docker 1.12 +func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c *check.C) { + netName := "test" + conName := "foo" + dockerCmd(c, "network", "create", netName) + dockerCmd(c, "create", "--name", conName, "busybox", "top") + + config := map[string]interface{}{ + "HostConfig": map[string]interface{}{ + "NetworkMode": netName, + }, + } + _, _, err := request.SockRequest("POST", formatV123StartAPIURL("/containers/"+conName+"/start"), config, daemonHost()) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(conName), checker.IsNil) + networks := inspectField(c, conName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netName, check.Commentf(fmt.Sprintf("Should contain '%s' network", netName))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} diff --git a/integration-cli/docker_experimental_network_test.go b/integration-cli/docker_experimental_network_test.go new file mode 100644 index 0000000..1240392 --- /dev/null +++ b/integration-cli/docker_experimental_network_test.go @@ -0,0 +1,533 @@ +// +build !windows + +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/parsers/kernel" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// ensure Kernel version is >= v3.9 for macvlan support +func macvlanKernelSupport() bool { + return checkKernelMajorVersionGreaterOrEqualThen(3, 9) +} + +// ensure Kernel version is >= v4.2 for ipvlan support +func ipvlanKernelSupport() bool { + return checkKernelMajorVersionGreaterOrEqualThen(4, 2) +} + +func checkKernelMajorVersionGreaterOrEqualThen(kernelVersion int, majorVersion int) bool { + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + if kv.Kernel < kernelVersion || (kv.Kernel == kernelVersion && kv.Major < majorVersion) { + return false + } + return true +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.60) + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + createMasterDummy(c, master) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.60", "dm-persist") + assertNwIsAvailable(c, "dm-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart(c) + // verify network is recreated from persistence + assertNwIsAvailable(c, "dm-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.70) + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'di' notation represent 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + createMasterDummy(c, master) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.70", "di-persist") + assertNwIsAvailable(c, "di-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart(c) + // verify network is recreated from persistence + assertNwIsAvailable(c, "di-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.50) + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + createMasterDummy(c, master) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.50", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.50) + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + createMasterDummy(c, master) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.60", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + createMasterDummy(c, master) + createVlanInterface(c, master, "dm-dummy0.40", "40") + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err := dockerCmdWithError("network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil, check.Commentf(out)) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + createMasterDummy(c, master) + createVlanInterface(c, master, "di-dummy0.30", "30") + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err := dockerCmdWithError("network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil, check.Commentf(out)) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { + // create a dual stack multi-subnet Macvlan bridge mode network and validate connectivity between four containers, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254", + "--subnet=2001:db8:abc2::/64", "--subnet=2001:db8:abc4::/64", "--gateway=2001:db8:abc4::254", "dualstackbridge") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackbridge") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + c.Skip("Temporarily skipping while invesitigating sporadic v6 CI issues") + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.100.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc2::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.102.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc4::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L2 network and validate connectivity within the subnets, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254", + "--subnet=2001:db8:abc8::/64", "--subnet=2001:db8:abc6::/64", "--gateway=2001:db8:abc6::254", "dualstackl2") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl2") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.200.0/24 and 2001:db8:abc8::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.202.0/24 and 2001:db8:abc6::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.200.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc8::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.202.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc6::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L3 network and validate connectivity between all four containers per L3 mode + testRequires(c, DaemonIsLinux, IPv6, ipvlanKernelSupport, NotUserNamespace, NotArm, IPv6, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254", + "--subnet=2001:db8:abc9::/64", "--subnet=2001:db8:abc7::/64", "--gateway=2001:db8:abc7::254", "-o", "ipvlan_mode=l3", "dualstackl3") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl3") + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.10.0/24 and 2001:db8:abc9::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.12.0/24 and 2001:db8:abc7::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // Verify connectivity across disparate subnets which is unique to L3 mode only + _, _, err = dockerCmdWithError("exec", "third", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "third", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "") + // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled + ip6gw := inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) { + // Ensure the default gateways, next-hops and default dev devices are properly set + testRequires(c, DaemonIsLinux, IPv6, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.130.0/24", + "--subnet=2001:db8:abca::/64", "--gateway=2001:db8:abca::254", "-o", "macvlan_mode=bridge", "dualstackbridge") + assertNwIsAvailable(c, "dualstackbridge") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "busybox", "top") + // Validate macvlan bridge mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err := dockerCmdWithError("exec", "first", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.130.1 dev eth0") + // Validate macvlan bridge mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "first", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abca::254 dev eth0") + + // Verify ipvlan l2 mode sets the proper default gateway routes via netlink + // for either an explicitly set route by the user or inferred via default IPAM + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254", + "--subnet=2001:db8:abcb::/64", "-o", "ipvlan_mode=l2", "dualstackl2") + assertNwIsAvailable(c, "dualstackl2") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "busybox", "top") + // Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err = dockerCmdWithError("exec", "second", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.140.254 dev eth0") + // Validate ipvlan l2 mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "second", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abcb::1 dev eth0") + + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254", + "--subnet=2001:db8:abcd::/64", "--gateway=2001:db8:abcd::254", "-o", "ipvlan_mode=l3", "dualstackl3") + assertNwIsAvailable(c, "dualstackl3") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "busybox", "top") + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") + // Validate ipvlan l3 mode sets the v6 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeNilParent(c *check.C) { + // macvlan bridge mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "dm-nil-parent") + assertNwIsAvailable(c, "dm-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeInternalMode(c *check.C) { + // macvlan bridge mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + cli.DockerCmd(c, "network", "create", "--driver=macvlan", "--internal", "dm-internal") + assertNwIsAvailable(c, "dm-internal") + nr := getNetworkResource(c, "dm-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + cli.DockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + cli.DockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := cli.Docker(cli.Args("exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8"), cli.WithTimeout(time.Second)) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + // intra-network communications should succeed + cli.DockerCmd(c, "exec", "second", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2NilParent(c *check.C) { + // ipvlan l2 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "di-nil-parent") + assertNwIsAvailable(c, "di-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2InternalMode(c *check.C) { + // ipvlan l2 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + cli.DockerCmd(c, "network", "create", "--driver=ipvlan", "--internal", "di-internal") + assertNwIsAvailable(c, "di-internal") + nr := getNetworkResource(c, "di-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + cli.DockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + cli.DockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := cli.Docker(cli.Args("exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8"), cli.WithTimeout(time.Second)) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + cli.DockerCmd(c, "exec", "second", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3NilParent(c *check.C) { + // ipvlan l3 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "di-nil-parent-l3") + assertNwIsAvailable(c, "di-nil-parent-l3") + + // start two containers on separate subnets + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3InternalMode(c *check.C) { + // ipvlan l3 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, ipvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + cli.DockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "--internal", "di-internal-l3") + assertNwIsAvailable(c, "di-internal-l3") + nr := getNetworkResource(c, "di-internal-l3") + c.Assert(nr.Internal, checker.True) + + // start two containers on separate subnets + cli.DockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + cli.DockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := cli.Docker(cli.Args("exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8"), cli.WithTimeout(time.Second)) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + cli.DockerCmd(c, "exec", "second", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanExistingParent(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-parent-exists" + createMasterDummy(c, "dm-dummy0") + //out, err := createVlanInterface(c, "dm-parent", "dm-slave", "macvlan", "bridge") + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0", netName) + assertNwIsAvailable(c, netName) + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined link + linkExists(c, "dm-dummy0") + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanSubinterface(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, macvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-subinterface" + createMasterDummy(c, "dm-dummy0") + createVlanInterface(c, "dm-dummy0", "dm-dummy0.20", "20") + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.20", netName) + assertNwIsAvailable(c, netName) + + // start containers on 802.1q tagged '-o parent' sub-interface + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // verify containers can communicate + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + + // remove the containers + dockerCmd(c, "rm", "-f", "first") + dockerCmd(c, "rm", "-f", "second") + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined sub-interface + linkExists(c, "dm-dummy0.20") + // delete the parent interface which also collects the slave + deleteInterface(c, "dm-dummy0") +} + +func createMasterDummy(c *check.C, master string) { + // ip link add type dummy + icmd.RunCommand("ip", "link", "add", master, "type", "dummy").Assert(c, icmd.Success) + icmd.RunCommand("ip", "link", "set", master, "up").Assert(c, icmd.Success) +} + +func createVlanInterface(c *check.C, master, slave, id string) { + // ip link add link name . type vlan id + icmd.RunCommand("ip", "link", "add", "link", master, "name", slave, "type", "vlan", "id", id).Assert(c, icmd.Success) + // ip link set up + icmd.RunCommand("ip", "link", "set", slave, "up").Assert(c, icmd.Success) +} + +func linkExists(c *check.C, master string) { + // verify the specified link exists, ip link show + icmd.RunCommand("ip", "link", "show", master).Assert(c, icmd.Success) +} diff --git a/integration-cli/docker_hub_pull_suite_test.go b/integration-cli/docker_hub_pull_suite_test.go new file mode 100644 index 0000000..2633720 --- /dev/null +++ b/integration-cli/docker_hub_pull_suite_test.go @@ -0,0 +1,91 @@ +package main + +import ( + "os/exec" + "runtime" + "strings" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func init() { + // FIXME. Temporarily turning this off for Windows as GH16039 was breaking + // Windows to Linux CI @icecrime + if runtime.GOOS != "windows" { + check.Suite(newDockerHubPullSuite()) + } +} + +// DockerHubPullSuite provides an isolated daemon that doesn't have all the +// images that are baked into our 'global' test environment daemon (e.g., +// busybox, httpserver, ...). +// +// We use it for push/pull tests where we want to start fresh, and measure the +// relative impact of each individual operation. As part of this suite, all +// images are removed after each test. +type DockerHubPullSuite struct { + d *daemon.Daemon + ds *DockerSuite +} + +// newDockerHubPullSuite returns a new instance of a DockerHubPullSuite. +func newDockerHubPullSuite() *DockerHubPullSuite { + return &DockerHubPullSuite{ + ds: &DockerSuite{}, + } +} + +// SetUpSuite starts the suite daemon. +func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: testEnv.ExperimentalDaemon(), + }) + s.d.Start(c) +} + +// TearDownSuite stops the suite daemon. +func (s *DockerHubPullSuite) TearDownSuite(c *check.C) { + if s.d != nil { + s.d.Stop(c) + } +} + +// SetUpTest declares that all tests of this suite require network. +func (s *DockerHubPullSuite) SetUpTest(c *check.C) { + testRequires(c, Network) +} + +// TearDownTest removes all images from the suite daemon. +func (s *DockerHubPullSuite) TearDownTest(c *check.C) { + out := s.Cmd(c, "images", "-aq") + images := strings.Split(out, "\n") + images = append([]string{"rmi", "-f"}, images...) + s.d.Cmd(images...) + s.ds.TearDownTest(c) +} + +// Cmd executes a command against the suite daemon and returns the combined +// output. The function fails the test when the command returns an error. +func (s *DockerHubPullSuite) Cmd(c *check.C, name string, arg ...string) string { + out, err := s.CmdWithError(name, arg...) + c.Assert(err, checker.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(arg, " "), out, err)) + return out +} + +// CmdWithError executes a command against the suite daemon and returns the +// combined output as well as any error. +func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, error) { + c := s.MakeCmd(name, arg...) + b, err := c.CombinedOutput() + return string(b), err +} + +// MakeCmd returns an exec.Cmd command to run against the suite daemon. +func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { + args := []string{"--host", s.d.Sock(), name} + args = append(args, arg...) + return exec.Command(dockerBinary, args...) +} diff --git a/integration-cli/docker_utils_test.go b/integration-cli/docker_utils_test.go new file mode 100644 index 0000000..1488c93 --- /dev/null +++ b/integration-cli/docker_utils_test.go @@ -0,0 +1,502 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/registry" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/go-check/check" +) + +// Deprecated +func daemonHost() string { + return request.DaemonHost() +} + +func deleteImages(images ...string) error { + args := []string{dockerBinary, "rmi", "-f"} + return icmd.RunCmd(icmd.Cmd{Command: append(args, images...)}).Error +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmdWithError(args ...string) (string, int, error) { + result := cli.Docker(cli.Args(args...)) + if result.Error != nil { + return result.Combined(), result.ExitCode, result.Compare(icmd.Success) + } + return result.Combined(), result.ExitCode, result.Error +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmd(c *check.C, args ...string) (string, int) { + result := cli.DockerCmd(c, args...) + return result.Combined(), result.ExitCode +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmdWithResult(args ...string) *icmd.Result { + return cli.Docker(cli.Args(args...)) +} + +func findContainerIP(c *check.C, id string, network string) string { + out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) + return strings.Trim(out, " \r\n'") +} + +func getContainerCount(c *check.C) int { + const containers = "Containers:" + + result := icmd.RunCommand(dockerBinary, "info") + result.Assert(c, icmd.Success) + + lines := strings.Split(result.Combined(), "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := strings.TrimSpace(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + c.Assert(err, checker.IsNil) + return containerCount + } + } + return 0 +} + +func inspectFieldAndUnmarshall(c *check.C, name, field string, output interface{}) { + str := inspectFieldJSON(c, name, field) + err := json.Unmarshal([]byte(str), output) + if c != nil { + c.Assert(err, check.IsNil, check.Commentf("failed to unmarshal: %v", err)) + } +} + +// Deprecated: use cli.Inspect +func inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + result := icmd.RunCommand(dockerBinary, "inspect", "-f", format, name) + if result.Error != nil || result.ExitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, result.Combined()) + } + return strings.TrimSpace(result.Combined()), nil +} + +// Deprecated: use cli.Inspect +func inspectFieldWithError(name, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +// Deprecated: use cli.Inspect +func inspectField(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf(".%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectFieldJSON(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("json .%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectFieldMap(c *check.C, name, path, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectMountSourceField(name, destination string) (string, error) { + m, err := inspectMountPoint(name, destination) + if err != nil { + return "", err + } + return m.Source, nil +} + +// Deprecated: use cli.Inspect +func inspectMountPoint(name, destination string) (types.MountPoint, error) { + out, err := inspectFilter(name, "json .Mounts") + if err != nil { + return types.MountPoint{}, err + } + + return inspectMountPointJSON(out, destination) +} + +var errMountNotFound = errors.New("mount point not found") + +// Deprecated: use cli.Inspect +func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { + var mp []types.MountPoint + if err := json.Unmarshal([]byte(j), &mp); err != nil { + return types.MountPoint{}, err + } + + var m *types.MountPoint + for _, c := range mp { + if c.Destination == destination { + m = &c + break + } + } + + if m == nil { + return types.MountPoint{}, errMountNotFound + } + + return *m, nil +} + +// Deprecated: use cli.Inspect +func inspectImage(c *check.C, name, filter string) string { + args := []string{"inspect", "--type", "image"} + if filter != "" { + format := fmt.Sprintf("{{%s}}", filter) + args = append(args, "-f", format) + } + args = append(args, name) + result := icmd.RunCommand(dockerBinary, args...) + result.Assert(c, icmd.Success) + return strings.TrimSpace(result.Combined()) +} + +func getIDByName(c *check.C, name string) string { + id, err := inspectFieldWithError(name, "Id") + c.Assert(err, checker.IsNil) + return id +} + +// Deprecated: use cli.Build +func buildImageSuccessfully(c *check.C, name string, cmdOperators ...cli.CmdOperator) { + buildImage(name, cmdOperators...).Assert(c, icmd.Success) +} + +// Deprecated: use cli.Build +func buildImage(name string, cmdOperators ...cli.CmdOperator) *icmd.Result { + return cli.Docker(cli.Build(name), cmdOperators...) +} + +// Deprecated: use trustedcmd +func trustedBuild(cmd *icmd.Cmd) func() { + trustedCmd(cmd) + return nil +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Fail the test when error occurs. +func writeFile(dst, content string, c *check.C) { + // Create subdirectories if necessary + c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + c.Assert(err, check.IsNil) + defer f.Close() + // Write content (truncate if it exists) + _, err = io.Copy(f, strings.NewReader(content)) + c.Assert(err, check.IsNil) +} + +// Return the contents of file at path `src`. +// Fail the test when error occurs. +func readFile(src string, c *check.C) (content string) { + data, err := ioutil.ReadFile(src) + c.Assert(err, check.IsNil) + + return string(data) +} + +func containerStorageFile(containerID, basename string) string { + return filepath.Join(testEnv.ContainerStoragePath(), containerID, basename) +} + +// docker commands that use this function must be run with the '-d' switch. +func runCommandAndReadContainerFile(c *check.C, filename string, command string, args ...string) []byte { + result := icmd.RunCommand(command, args...) + result.Assert(c, icmd.Success) + contID := strings.TrimSpace(result.Combined()) + if err := waitRun(contID); err != nil { + c.Fatalf("%v: %q", contID, err) + } + return readContainerFile(c, contID, filename) +} + +func readContainerFile(c *check.C, containerID, filename string) []byte { + f, err := os.Open(containerStorageFile(containerID, filename)) + c.Assert(err, checker.IsNil) + defer f.Close() + + content, err := ioutil.ReadAll(f) + c.Assert(err, checker.IsNil) + return content +} + +func readContainerFileWithExec(c *check.C, containerID, filename string) []byte { + result := icmd.RunCommand(dockerBinary, "exec", containerID, "cat", filename) + result.Assert(c, icmd.Success) + return []byte(result.Combined()) +} + +// daemonTime provides the current time on the daemon host +func daemonTime(c *check.C) time.Time { + if testEnv.LocalDaemon() { + return time.Now() + } + + status, body, err := request.SockRequest("GET", "/info", nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + type infoJSON struct { + SystemTime string + } + var info infoJSON + err = json.Unmarshal(body, &info) + c.Assert(err, check.IsNil, check.Commentf("unable to unmarshal GET /info response")) + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) + return dt +} + +// daemonUnixTime returns the current time on the daemon host with nanoseconds precision. +// It return the time formatted how the client sends timestamps to the server. +func daemonUnixTime(c *check.C) string { + return parseEventTime(daemonTime(c)) +} + +func parseEventTime(t time.Time) string { + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())) +} + +func setupRegistry(c *check.C, schema1 bool, auth, tokenURL string) *registry.V2 { + reg, err := registry.NewV2(schema1, auth, tokenURL, privateRegistryURL) + c.Assert(err, check.IsNil) + + // Wait for registry to be ready to serve requests. + for i := 0; i != 50; i++ { + if err = reg.Ping(); err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for test registry to become available: %v", err)) + return reg +} + +func setupNotary(c *check.C) *testNotary { + ts, err := newTestNotary(c) + c.Assert(err, check.IsNil) + + return ts +} + +// appendBaseEnv appends the minimum set of environment variables to exec the +// docker cli binary for testing with correct configuration to the given env +// list. +func appendBaseEnv(isTLS bool, env ...string) []string { + preserveList := []string{ + // preserve remote test host + "DOCKER_HOST", + + // windows: requires preserving SystemRoot, otherwise dial tcp fails + // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." + "SystemRoot", + + // testing help text requires the $PATH to dockerd is set + "PATH", + } + if isTLS { + preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") + } + + for _, key := range preserveList { + if val := os.Getenv(key); val != "" { + env = append(env, fmt.Sprintf("%s=%s", key, val)) + } + } + return env +} + +func createTmpFile(c *check.C, content string) string { + f, err := ioutil.TempFile("", "testfile") + c.Assert(err, check.IsNil) + + filename := f.Name() + + err = ioutil.WriteFile(filename, []byte(content), 0644) + c.Assert(err, check.IsNil) + + return filename +} + +// waitRun will wait for the specified container to be running, maximum 5 seconds. +// Deprecated: use cli.WaitFor +func waitRun(contID string) error { + return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) +} + +// waitInspect will wait for the specified container to have the specified string +// in the inspect output. It will wait until the specified timeout (in seconds) +// is reached. +// Deprecated: use cli.WaitFor +func waitInspect(name, expr, expected string, timeout time.Duration) error { + return waitInspectWithArgs(name, expr, expected, timeout) +} + +// Deprecated: use cli.WaitFor +func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { + return daemon.WaitInspectWithArgs(dockerBinary, name, expr, expected, timeout, arg...) +} + +func getInspectBody(c *check.C, version, id string) []byte { + endpoint := fmt.Sprintf("/%s/containers/%s/json", version, id) + status, body, err := request.SockRequest("GET", endpoint, nil, daemonHost()) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + return body +} + +// Run a long running idle task in a background container using the +// system-specific default image and command. +func runSleepingContainer(c *check.C, extraArgs ...string) string { + return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) +} + +// Run a long running idle task in a background container using the specified +// image and the system-specific command. +func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) string { + args := []string{"run", "-d"} + args = append(args, extraArgs...) + args = append(args, image) + args = append(args, sleepCommandForDaemonPlatform()...) + return cli.DockerCmd(c, args...).Combined() +} + +// minimalBaseImage returns the name of the minimal base image for the current +// daemon platform. +func minimalBaseImage() string { + return testEnv.MinimalBaseImage() +} + +func getGoroutineNumber() (int, error) { + i := struct { + NGoroutines int + }{} + status, b, err := request.SockRequest("GET", "/info", nil, daemonHost()) + if err != nil { + return 0, err + } + if status != http.StatusOK { + return 0, fmt.Errorf("http status code: %d", status) + } + if err := json.Unmarshal(b, &i); err != nil { + return 0, err + } + return i.NGoroutines, nil +} + +func waitForGoroutines(expected int) error { + t := time.After(30 * time.Second) + for { + select { + case <-t: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n > expected { + return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n) + } + default: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n <= expected { + return nil + } + time.Sleep(200 * time.Millisecond) + } + } +} + +// getErrorMessage returns the error message from an error API response +func getErrorMessage(c *check.C, body []byte) string { + var resp types.ErrorResponse + c.Assert(json.Unmarshal(body, &resp), check.IsNil) + return strings.TrimSpace(resp.Message) +} + +func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { + after := time.After(timeout) + for { + v, comment := f(c) + assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params) + select { + case <-after: + assert = true + default: + } + if assert { + if comment != nil { + args = append(args, comment) + } + c.Assert(v, checker, args...) + return + } + time.Sleep(100 * time.Millisecond) + } +} + +type checkF func(*check.C) (interface{}, check.CommentInterface) +type reducer func(...interface{}) interface{} + +func reducedCheck(r reducer, funcs ...checkF) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + var values []interface{} + var comments []string + for _, f := range funcs { + v, comment := f(c) + values = append(values, v) + if comment != nil { + comments = append(comments, comment.CheckCommentString()) + } + } + return r(values...), check.Commentf("%v", strings.Join(comments, ", ")) + } +} + +func sumAsIntegers(vals ...interface{}) interface{} { + var s int + for _, v := range vals { + s += v.(int) + } + return s +} diff --git a/integration-cli/environment/clean.go b/integration-cli/environment/clean.go new file mode 100644 index 0000000..21905f5 --- /dev/null +++ b/integration-cli/environment/clean.go @@ -0,0 +1,217 @@ +package environment + +import ( + "encoding/json" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/integration-cli/request" + icmd "github.com/docker/docker/pkg/testutil/cmd" +) + +type testingT interface { + logT + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// Clean the environment, preserving protected objects (images, containers, ...) +// and removing everything else. It's meant to run after any tests so that they don't +// depend on each others. +func (e *Execution) Clean(t testingT, dockerBinary string) { + if (e.DaemonPlatform() != "windows") || (e.DaemonPlatform() == "windows" && e.Isolation() == "hyperv") { + unpauseAllContainers(t, dockerBinary) + } + deleteAllContainers(t, dockerBinary) + deleteAllImages(t, dockerBinary, e.protectedElements.images) + deleteAllVolumes(t, dockerBinary) + deleteAllNetworks(t, dockerBinary, e.DaemonPlatform()) + if e.DaemonPlatform() == "linux" { + // TODO FIXME + //deleteAllPlugins(t, dockerBinary) + } +} + +func unpauseAllContainers(t testingT, dockerBinary string) { + containers := getPausedContainers(t, dockerBinary) + if len(containers) > 0 { + icmd.RunCommand(dockerBinary, append([]string{"unpause"}, containers...)...).Assert(t, icmd.Success) + } +} + +func getPausedContainers(t testingT, dockerBinary string) []string { + result := icmd.RunCommand(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") + result.Assert(t, icmd.Success) + return strings.Fields(result.Combined()) +} + +var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`) + +func deleteAllContainers(t testingT, dockerBinary string) { + containers := getAllContainers(t, dockerBinary) + if len(containers) > 0 { + result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, containers...)...) + if result.Error != nil { + // If the error is "No such container: ..." this means the container doesn't exists anymore, + // or if it is "... removal of container ... is already in progress" it will be removed eventually. + // We can safely ignore those. + if strings.Contains(result.Stderr(), "No such container") || alreadyExists.MatchString(result.Stderr()) { + return + } + t.Fatalf("error removing containers %v : %v (%s)", containers, result.Error, result.Combined()) + } + } +} + +func getAllContainers(t testingT, dockerBinary string) []string { + result := icmd.RunCommand(dockerBinary, "ps", "-q", "-a") + result.Assert(t, icmd.Success) + return strings.Fields(result.Combined()) +} + +func deleteAllImages(t testingT, dockerBinary string, protectedImages map[string]struct{}) { + result := icmd.RunCommand(dockerBinary, "images", "--digests") + result.Assert(t, icmd.Success) + lines := strings.Split(string(result.Combined()), "\n")[1:] + imgMap := map[string]struct{}{} + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + if _, ok := protectedImages[imgTag]; !ok { + if fields[0] == "" || fields[1] == "" { + if fields[2] != "" { + imgMap[fields[0]+"@"+fields[2]] = struct{}{} + } else { + imgMap[fields[3]] = struct{}{} + } + // continue + } else { + imgMap[imgTag] = struct{}{} + } + } + } + if len(imgMap) != 0 { + imgs := make([]string, 0, len(imgMap)) + for k := range imgMap { + imgs = append(imgs, k) + } + icmd.RunCommand(dockerBinary, append([]string{"rmi", "-f"}, imgs...)...).Assert(t, icmd.Success) + } +} + +func deleteAllVolumes(t testingT, dockerBinary string) { + volumes, err := getAllVolumes() + if err != nil { + t.Fatalf("%v", err) + } + var errs []string + for _, v := range volumes { + status, b, err := request.SockRequest("DELETE", "/volumes/"+v.Name, nil, request.DaemonHost()) + if err != nil { + errs = append(errs, err.Error()) + continue + } + if status != http.StatusNoContent { + errs = append(errs, fmt.Sprintf("error deleting volume %s: %s", v.Name, string(b))) + } + } + if len(errs) > 0 { + t.Fatalf("%v", strings.Join(errs, "\n")) + } +} + +func getAllVolumes() ([]*types.Volume, error) { + var volumes volumetypes.VolumesListOKBody + _, b, err := request.SockRequest("GET", "/volumes", nil, request.DaemonHost()) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &volumes); err != nil { + return nil, err + } + return volumes.Volumes, nil +} + +func deleteAllNetworks(t testingT, dockerBinary string, daemonPlatform string) { + networks, err := getAllNetworks() + if err != nil { + t.Fatalf("%v", err) + } + var errs []string + for _, n := range networks { + if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { + continue + } + if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { + // nat is a pre-defined network on Windows and cannot be removed + continue + } + status, b, err := request.SockRequest("DELETE", "/networks/"+n.Name, nil, request.DaemonHost()) + if err != nil { + errs = append(errs, err.Error()) + continue + } + if status != http.StatusNoContent { + errs = append(errs, fmt.Sprintf("error deleting network %s: %s", n.Name, string(b))) + } + } + if len(errs) > 0 { + t.Fatalf("%v", strings.Join(errs, "\n")) + } +} + +func getAllNetworks() ([]types.NetworkResource, error) { + var networks []types.NetworkResource + _, b, err := request.SockRequest("GET", "/networks", nil, request.DaemonHost()) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &networks); err != nil { + return nil, err + } + return networks, nil +} + +func deleteAllPlugins(t testingT, dockerBinary string) { + plugins, err := getAllPlugins() + if err != nil { + t.Fatalf("%v", err) + } + var errs []string + for _, p := range plugins { + pluginName := p.Name + status, b, err := request.SockRequest("DELETE", "/plugins/"+pluginName+"?force=1", nil, request.DaemonHost()) + if err != nil { + errs = append(errs, err.Error()) + continue + } + if status != http.StatusOK { + errs = append(errs, fmt.Sprintf("error deleting plugin %s: %s", p.Name, string(b))) + } + } + if len(errs) > 0 { + t.Fatalf("%v", strings.Join(errs, "\n")) + } +} + +func getAllPlugins() (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + _, b, err := request.SockRequest("GET", "/plugins", nil, request.DaemonHost()) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &plugins); err != nil { + return nil, err + } + return plugins, nil +} diff --git a/integration-cli/environment/environment.go b/integration-cli/environment/environment.go new file mode 100644 index 0000000..a8a1045 --- /dev/null +++ b/integration-cli/environment/environment.go @@ -0,0 +1,229 @@ +package environment + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" + "golang.org/x/net/context" +) + +var ( + // DefaultClientBinary is the name of the docker binary + DefaultClientBinary = os.Getenv("TEST_CLIENT_BINARY") +) + +func init() { + if DefaultClientBinary == "" { + // TODO: to be removed once we no longer depend on the docker cli for integration tests + //panic("TEST_CLIENT_BINARY must be set") + DefaultClientBinary = "docker" + } +} + +// Execution holds informations about the test execution environment. +type Execution struct { + daemonPlatform string + localDaemon bool + experimentalDaemon bool + daemonStorageDriver string + isolation container.Isolation + daemonPid int + daemonKernelVersion string + // For a local daemon on Linux, these values will be used for testing + // user namespace support as the standard graph path(s) will be + // appended with the root remapped uid.gid prefix + dockerBasePath string + volumesConfigPath string + containerStoragePath string + // baseImage is the name of the base image for testing + // Environment variable WINDOWS_BASE_IMAGE can override this + baseImage string + dockerBinary string + + protectedElements protectedElements +} + +// New creates a new Execution struct +func New() (*Execution, error) { + localDaemon := true + // Deterministically working out the environment in which CI is running + // to evaluate whether the daemon is local or remote is not possible through + // a build tag. + // + // For example Windows to Linux CI under Jenkins tests the 64-bit + // Windows binary build with the daemon build tag, but calls a remote + // Linux daemon. + // + // We can't just say if Windows then assume the daemon is local as at + // some point, we will be testing the Windows CLI against a Windows daemon. + // + // Similarly, it will be perfectly valid to also run CLI tests from + // a Linux CLI (built with the daemon tag) against a Windows daemon. + if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { + localDaemon = false + } + info, err := getDaemonDockerInfo() + if err != nil { + return nil, err + } + daemonPlatform := info.OSType + if daemonPlatform != "linux" && daemonPlatform != "windows" { + return nil, fmt.Errorf("Cannot run tests against platform: %s", daemonPlatform) + } + baseImage := "scratch" + volumesConfigPath := filepath.Join(info.DockerRootDir, "volumes") + containerStoragePath := filepath.Join(info.DockerRootDir, "containers") + // Make sure in context of daemon, not the local platform. Note we can't + // use filepath.FromSlash or ToSlash here as they are a no-op on Unix. + if daemonPlatform == "windows" { + volumesConfigPath = strings.Replace(volumesConfigPath, `/`, `\`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `/`, `\`, -1) + + baseImage = "microsoft/windowsservercore" + if len(os.Getenv("WINDOWS_BASE_IMAGE")) > 0 { + baseImage = os.Getenv("WINDOWS_BASE_IMAGE") + fmt.Println("INFO: Windows Base image is ", baseImage) + } + } else { + volumesConfigPath = strings.Replace(volumesConfigPath, `\`, `/`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `\`, `/`, -1) + } + + var daemonPid int + dest := os.Getenv("DEST") + b, err := ioutil.ReadFile(filepath.Join(dest, "docker.pid")) + if err == nil { + if p, err := strconv.ParseInt(string(b), 10, 32); err == nil { + daemonPid = int(p) + } + } + + dockerBinary, err := exec.LookPath(DefaultClientBinary) + if err != nil { + return nil, err + } + + return &Execution{ + localDaemon: localDaemon, + daemonPlatform: daemonPlatform, + daemonStorageDriver: info.Driver, + daemonKernelVersion: info.KernelVersion, + dockerBasePath: info.DockerRootDir, + volumesConfigPath: volumesConfigPath, + containerStoragePath: containerStoragePath, + isolation: info.Isolation, + daemonPid: daemonPid, + experimentalDaemon: info.ExperimentalBuild, + baseImage: baseImage, + dockerBinary: dockerBinary, + protectedElements: protectedElements{ + images: map[string]struct{}{}, + }, + }, nil +} +func getDaemonDockerInfo() (types.Info, error) { + // FIXME(vdemeester) should be safe to use as is + client, err := client.NewEnvClient() + if err != nil { + return types.Info{}, err + } + return client.Info(context.Background()) +} + +// LocalDaemon is true if the daemon under test is on the same +// host as the CLI. +func (e *Execution) LocalDaemon() bool { + return e.localDaemon +} + +// DaemonPlatform is held globally so that tests can make intelligent +// decisions on how to configure themselves according to the platform +// of the daemon. This is initialized in docker_utils by sending +// a version call to the daemon and examining the response header. +func (e *Execution) DaemonPlatform() string { + return e.daemonPlatform +} + +// DockerBasePath is the base path of the docker folder (by default it is -/var/run/docker) +func (e *Execution) DockerBasePath() string { + return e.dockerBasePath +} + +// VolumesConfigPath is the path of the volume configuration for the testing daemon +func (e *Execution) VolumesConfigPath() string { + return e.volumesConfigPath +} + +// ContainerStoragePath is the path where the container are stored for the testing daemon +func (e *Execution) ContainerStoragePath() string { + return e.containerStoragePath +} + +// DaemonStorageDriver is held globally so that tests can know the storage +// driver of the daemon. This is initialized in docker_utils by sending +// a version call to the daemon and examining the response header. +func (e *Execution) DaemonStorageDriver() string { + return e.daemonStorageDriver +} + +// Isolation is the isolation mode of the daemon under test +func (e *Execution) Isolation() container.Isolation { + return e.isolation +} + +// DaemonPID is the pid of the main test daemon +func (e *Execution) DaemonPID() int { + return e.daemonPid +} + +// ExperimentalDaemon tell whether the main daemon has +// experimental features enabled or not +func (e *Execution) ExperimentalDaemon() bool { + return e.experimentalDaemon +} + +// MinimalBaseImage is the image used for minimal builds (it depends on the platform) +func (e *Execution) MinimalBaseImage() string { + return e.baseImage +} + +// DaemonKernelVersion is the kernel version of the daemon as a string, as returned +// by an INFO call to the daemon. +func (e *Execution) DaemonKernelVersion() string { + return e.daemonKernelVersion +} + +// DaemonKernelVersionNumeric is the kernel version of the daemon as an integer. +// Mostly useful on Windows where DaemonKernelVersion holds the full string such +// as `10.0 14393 (14393.447.amd64fre.rs1_release_inmarket.161102-0100)`, but +// integration tests really only need the `14393` piece to make decisions. +func (e *Execution) DaemonKernelVersionNumeric() int { + if e.daemonPlatform != "windows" { + return -1 + } + v, _ := strconv.Atoi(strings.Split(e.daemonKernelVersion, " ")[1]) + return v +} + +// DockerBinary returns the docker binary for this testing environment +func (e *Execution) DockerBinary() string { + return e.dockerBinary +} + +// DaemonHost return the daemon host string for this test execution +func DaemonHost() string { + daemonURLStr := "unix://" + opts.DefaultUnixSocket + if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { + daemonURLStr = daemonHostVar + } + return daemonURLStr +} diff --git a/integration-cli/environment/protect.go b/integration-cli/environment/protect.go new file mode 100644 index 0000000..2b0dd6d --- /dev/null +++ b/integration-cli/environment/protect.go @@ -0,0 +1,12 @@ +package environment + +// ProtectImage adds the specified image(s) to be protected in case of clean +func (e *Execution) ProtectImage(t testingT, images ...string) { + for _, image := range images { + e.protectedElements.images[image] = struct{}{} + } +} + +type protectedElements struct { + images map[string]struct{} +} diff --git a/integration-cli/events_utils_test.go b/integration-cli/events_utils_test.go new file mode 100644 index 0000000..9350edc --- /dev/null +++ b/integration-cli/events_utils_test.go @@ -0,0 +1,206 @@ +package main + +import ( + "bufio" + "bytes" + "io" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +// eventMatcher is a function that tries to match an event input. +// It returns true if the event matches and a map with +// a set of key/value to identify the match. +type eventMatcher func(text string) (map[string]string, bool) + +// eventMatchProcessor is a function to handle an event match. +// It receives a map of key/value with the information extracted in a match. +type eventMatchProcessor func(matches map[string]string) + +// eventObserver runs an events commands and observes its output. +type eventObserver struct { + buffer *bytes.Buffer + command *exec.Cmd + scanner *bufio.Scanner + startTime string + disconnectionError error +} + +// newEventObserver creates the observer and initializes the command +// without running it. Users must call `eventObserver.Start` to start the command. +func newEventObserver(c *check.C, args ...string) (*eventObserver, error) { + since := daemonTime(c).Unix() + return newEventObserverWithBacklog(c, since, args...) +} + +// newEventObserverWithBacklog creates a new observer changing the start time of the backlog to return. +func newEventObserverWithBacklog(c *check.C, since int64, args ...string) (*eventObserver, error) { + startTime := strconv.FormatInt(since, 10) + cmdArgs := []string{"events", "--since", startTime} + if len(args) > 0 { + cmdArgs = append(cmdArgs, args...) + } + eventsCmd := exec.Command(dockerBinary, cmdArgs...) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + return nil, err + } + + return &eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + }, nil +} + +// Start starts the events command. +func (e *eventObserver) Start() error { + return e.command.Start() +} + +// Stop stops the events command. +func (e *eventObserver) Stop() { + e.command.Process.Kill() + e.command.Process.Release() +} + +// Match tries to match the events output with a given matcher. +func (e *eventObserver) Match(match eventMatcher, process eventMatchProcessor) { + for e.scanner.Scan() { + text := e.scanner.Text() + e.buffer.WriteString(text) + e.buffer.WriteString("\n") + + if matches, ok := match(text); ok { + process(matches) + } + } + + err := e.scanner.Err() + if err == nil { + err = io.EOF + } + + logrus.Debugf("EventObserver scanner loop finished: %v", err) + e.disconnectionError = err +} + +func (e *eventObserver) CheckEventError(c *check.C, id, event string, match eventMatcher) { + var foundEvent bool + scannerOut := e.buffer.String() + + if e.disconnectionError != nil { + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", e.startTime, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + for _, e := range events { + if _, ok := match(e); ok { + foundEvent = true + break + } + } + scannerOut = out + } + if !foundEvent { + c.Fatalf("failed to observe event `%s` for %s. Disconnection error: %v\nout:\n%v", event, id, e.disconnectionError, scannerOut) + } +} + +// matchEventLine matches a text with the event regular expression. +// It returns the matches and true if the regular expression matches with the given id and event type. +// It returns an empty map and false if there is no match. +func matchEventLine(id, eventType string, actions map[string]chan bool) eventMatcher { + return func(text string) (map[string]string, bool) { + matches := eventstestutils.ScanMap(text) + if len(matches) == 0 { + return matches, false + } + + if matchIDAndEventType(matches, id, eventType) { + if _, ok := actions[matches["action"]]; ok { + return matches, true + } + } + return matches, false + } +} + +// processEventMatch closes an action channel when an event line matches the expected action. +func processEventMatch(actions map[string]chan bool) eventMatchProcessor { + return func(matches map[string]string) { + if ch, ok := actions[matches["action"]]; ok { + ch <- true + } + } +} + +// parseEventAction parses an event text and returns the action. +// It fails if the text is not in the event format. +func parseEventAction(c *check.C, text string) string { + matches := eventstestutils.ScanMap(text) + return matches["action"] +} + +// eventActionsByIDAndType returns the actions for a given id and type. +// It fails if the text is not in the event format. +func eventActionsByIDAndType(c *check.C, events []string, id, eventType string) []string { + var filtered []string + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matches, checker.Not(checker.IsNil)) + if matchIDAndEventType(matches, id, eventType) { + filtered = append(filtered, matches["action"]) + } + } + return filtered +} + +// matchIDAndEventType returns true if an event matches a given id and type. +// It also resolves names in the event attributes if the id doesn't match. +func matchIDAndEventType(matches map[string]string, id, eventType string) bool { + return matchEventID(matches, id) && matches["eventType"] == eventType +} + +func matchEventID(matches map[string]string, id string) bool { + matchID := matches["id"] == id || strings.HasPrefix(matches["id"], id) + if !matchID && matches["attributes"] != "" { + // try matching a name in the attributes + attributes := map[string]string{} + for _, a := range strings.Split(matches["attributes"], ", ") { + kv := strings.Split(a, "=") + attributes[kv[0]] = kv[1] + } + matchID = attributes["name"] == id + } + return matchID +} + +func parseEvents(c *check.C, out, match string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} + +func parseEventsWithID(c *check.C, out, match, id string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, id), checker.True) + + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} diff --git a/integration-cli/fixtures/auth/docker-credential-shell-test b/integration-cli/fixtures/auth/docker-credential-shell-test new file mode 100755 index 0000000..97b3f14 --- /dev/null +++ b/integration-cli/fixtures/auth/docker-credential-shell-test @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +set -e + +listFile=shell_test_list.json + +case $1 in + "store") + in=$( $TEMP/$serverHash + # add the server to the list file + if [[ ! -f $TEMP/$listFile ]]; then + echo "{ \"${server}\": \"${username}\" }" > $TEMP/$listFile + else + list=$(<$TEMP/$listFile) + echo "$list" | jq ". + {\"${server}\": \"${username}\"}" > $TEMP/$listFile + fi + ;; + "get") + in=$( $TEMP/$listFile + ;; + "list") + if [[ ! -f $TEMP/$listFile ]]; then + echo "{}" + else + payload=$(<$TEMP/$listFile) + echo "$payload" + fi + ;; + *) + echo "unknown credential option" + exit 1 + ;; +esac diff --git a/integration-cli/fixtures/credentialspecs/valid.json b/integration-cli/fixtures/credentialspecs/valid.json new file mode 100644 index 0000000..28913e4 --- /dev/null +++ b/integration-cli/fixtures/credentialspecs/valid.json @@ -0,0 +1,25 @@ +{ + "CmsPlugins": [ + "ActiveDirectory" + ], + "DomainJoinConfig": { + "Sid": "S-1-5-21-4288985-3632099173-1864715694", + "MachineAccountName": "MusicStoreAcct", + "Guid": "3705d4c3-0b80-42a9-ad97-ebc1801c74b9", + "DnsTreeName": "hyperv.local", + "DnsName": "hyperv.local", + "NetBiosName": "hyperv" + }, + "ActiveDirectoryConfig": { + "GroupManagedServiceAccounts": [ + { + "Name": "MusicStoreAcct", + "Scope": "hyperv.local" + }, + { + "Name": "MusicStoreAcct", + "Scope": "hyperv" + } + ] + } +} diff --git a/integration-cli/fixtures/deploy/default.yaml b/integration-cli/fixtures/deploy/default.yaml new file mode 100644 index 0000000..f30c04f --- /dev/null +++ b/integration-cli/fixtures/deploy/default.yaml @@ -0,0 +1,9 @@ + +version: "3" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + db: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: "tail -f /dev/null" diff --git a/integration-cli/fixtures/deploy/remove.yaml b/integration-cli/fixtures/deploy/remove.yaml new file mode 100644 index 0000000..4337581 --- /dev/null +++ b/integration-cli/fixtures/deploy/remove.yaml @@ -0,0 +1,11 @@ + +version: "3.1" +services: + web: + image: busybox:latest + command: top + secrets: + - special +secrets: + special: + file: fixtures/secrets/default diff --git a/integration-cli/fixtures/deploy/secrets.yaml b/integration-cli/fixtures/deploy/secrets.yaml new file mode 100644 index 0000000..6ac92cd --- /dev/null +++ b/integration-cli/fixtures/deploy/secrets.yaml @@ -0,0 +1,20 @@ + +version: "3.1" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + secrets: + - special + - source: super + target: foo.txt + mode: 0400 + - star +secrets: + special: + file: fixtures/secrets/default + super: + file: fixtures/secrets/default + star: + external: + name: outside diff --git a/integration-cli/fixtures/https/ca.pem b/integration-cli/fixtures/https/ca.pem new file mode 100644 index 0000000..6825d6d --- /dev/null +++ b/integration-cli/fixtures/https/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/client-cert.pem b/integration-cli/fixtures/https/client-cert.pem new file mode 100644 index 0000000..c05ed47 --- /dev/null +++ b/integration-cli/fixtures/https/client-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/client-key.pem b/integration-cli/fixtures/https/client-key.pem new file mode 100644 index 0000000..b5c15f8 --- /dev/null +++ b/integration-cli/fixtures/https/client-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff --git a/integration-cli/fixtures/https/client-rogue-cert.pem b/integration-cli/fixtures/https/client-rogue-cert.pem new file mode 100644 index 0000000..21ae4bd --- /dev/null +++ b/integration-cli/fixtures/https/client-rogue-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 24 17:54:59 2014 GMT + Not After : Feb 22 17:54:59 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: + e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: + 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: + bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: + b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: + f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: + e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: + 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: + 1d:7b:6c:7b:be:89:6b:88:8b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: + 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: + fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: + be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: + cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: + 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: + 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: + b3:f9 +-----BEGIN CERTIFICATE----- +MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx +ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t +YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM +R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 +aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL +lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB +hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW +BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x +I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw +EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL +EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l +MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB +AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 +RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 +C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/client-rogue-key.pem b/integration-cli/fixtures/https/client-rogue-key.pem new file mode 100644 index 0000000..53c122a --- /dev/null +++ b/integration-cli/fixtures/https/client-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce +aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W +tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf +bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ +nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW ++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej +VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd +vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 +6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F +MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa +8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg +OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ +SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 +nrOdMf15T6QF7Q== +-----END PRIVATE KEY----- diff --git a/integration-cli/fixtures/https/server-cert.pem b/integration-cli/fixtures/https/server-cert.pem new file mode 100644 index 0000000..08abfd1 --- /dev/null +++ b/integration-cli/fixtures/https/server-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/server-key.pem b/integration-cli/fixtures/https/server-key.pem new file mode 100644 index 0000000..c269320 --- /dev/null +++ b/integration-cli/fixtures/https/server-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff --git a/integration-cli/fixtures/https/server-rogue-cert.pem b/integration-cli/fixtures/https/server-rogue-cert.pem new file mode 100644 index 0000000..28feba6 --- /dev/null +++ b/integration-cli/fixtures/https/server-rogue-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 28 18:49:31 2014 GMT + Not After : Feb 26 18:49:31 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: + 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: + 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: + 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: + 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: + aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: + d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: + 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: + 9e:02:5c:be:65:98:a4:b4:b5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: + 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: + 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: + 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: + 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: + 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: + ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: + c7:9f +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv +c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu +ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I +dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc +qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW +VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg +hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO +lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe +MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj +bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw +Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO +AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 +CQxdfIYk3ZLVsxQGx58= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/https/server-rogue-key.pem b/integration-cli/fixtures/https/server-rogue-key.pem new file mode 100644 index 0000000..10f7c65 --- /dev/null +++ b/integration-cli/fixtures/https/server-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG +j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq +FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C +ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR +8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 +6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl +1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD +37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO +moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl +3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w +ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs +wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj +iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ +Z1hrIq8xYl2LOQ== +-----END PRIVATE KEY----- diff --git a/integration-cli/fixtures/load/emptyLayer.tar b/integration-cli/fixtures/load/emptyLayer.tar new file mode 100644 index 0000000000000000000000000000000000000000..beabb569ac1e7fea69848f79ffd77fb17b2487b0 GIT binary patch literal 30720 zcmeI4ZExeo5y$c* zovW*2`my-BS#-6VC#(TWylUoH;FtY>E>gSBdD(R?T)*vNRTtCjW{dQq*rhc~JiEJz zsl`J#iI?4n-S>-|RPWcb+PkO&p2Vj7>-wi+S}$j_7sXd|yt?(*H*av1 z+nj$j?T=M`^{U#O?d!U0Kio7`Pi=kdMg6yBHQQD7cjwb=bNHthMQp;4h~=l$E~*AE zuwWB8>5;iMTvuHR-Lmal{@LpHT4MUleZcZ73oou6S9)q>^d-k>oX_}Qcxyo^p;T#I z4yBjEs}MP78A@rGh|&6(B$v4q&IKEaPbcYiRLK(|Fm&Lbue#U-=g_}>K?m>u=o9SE z|2Wfh|Nm{EC(M}~AUk1=_?|0jAs(MS-F>HlGq1Fv!G%u}F zxq~RdI2NK#f|*#Nn~(V{vFLOO8UqEuV@wWH0lWbP;+XE!kN?B|zvfDCV+{KLI33XJ zG420+{r|!CKbik8jnddl(C`@lF&t?A{}{IbApid{FhAKg29N*Y{y*OI0q1{v<6mif z{ttd8A4C0_$Nx6||IluLzUGZ<*Y1BAOzlPXfBaM3-28d; z^3|(Ru4ZY9j6K(!m62PF7=SBvvY>6u9_MkwI>$54a7OCT$T3-m=lJE( zmUnm}hhi;&u@=Bq+_JlF`T)-5bsBwzUH;iw#FV;!sB`|AkTbLnLJ;j*XJ0q(@q+&r#AY7K_tidj`Je@64Y zt^SAcHvH+*phv{y_9nO&aXZ27@H1gNA7uuCn|I7u4AJ4h5%>!(#?o4@01sM9=?Y<}U-wbn}~IT=+q) zpnU-y`(0?Hj|bqk-Cup*+8>Y5g-CYfCvx)B0vO)01+SpM1Tko z0U|&IhyW2F0z`la5CI}U1c(3;cp3zTzMXo^%Y*&eXctdy*>?6A8Sq zp|r}Cz7&k%LWj}|qk_p@3pT0=6BEu*_dHZBTzvXOYO{wlC)G^^hyW2F0z`la5CI}U k1c(3;AOb{y2oM1xKm>>Y5g-CYfCvx)B0vO)z%wTBe{YUtx&QzG literal 0 HcmV?d00001 diff --git a/integration-cli/fixtures/load/frozen.go b/integration-cli/fixtures/load/frozen.go new file mode 100644 index 0000000..13cd393 --- /dev/null +++ b/integration-cli/fixtures/load/frozen.go @@ -0,0 +1,182 @@ +package load + +import ( + "bufio" + "bytes" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var frozenImgDir = "/docker-frozen-images" + +// FrozenImagesLinux loads the frozen image set for the integration suite +// If the images are not available locally it will download them +// TODO: This loads whatever is in the frozen image dir, regardless of what +// images were passed in. If the images need to be downloaded, then it will respect +// the passed in images +func FrozenImagesLinux(dockerBinary string, images ...string) error { + imgNS := os.Getenv("TEST_IMAGE_NAMESPACE") + var loadImages []struct{ srcName, destName string } + for _, img := range images { + if err := exec.Command(dockerBinary, "inspect", "--type=image", img).Run(); err != nil { + srcName := img + // hello-world:latest gets re-tagged as hello-world:frozen + // there are some tests that use hello-world:latest specifically so it pulls + // the image and hello-world:frozen is used for when we just want a super + // small image + if img == "hello-world:frozen" { + srcName = "hello-world:latest" + } + if imgNS != "" { + srcName = imgNS + "/" + srcName + } + loadImages = append(loadImages, struct{ srcName, destName string }{ + srcName: srcName, + destName: img, + }) + } + } + if len(loadImages) == 0 { + // everything is loaded, we're done + return nil + } + + fi, err := os.Stat(frozenImgDir) + if err != nil || !fi.IsDir() { + srcImages := make([]string, 0, len(loadImages)) + for _, img := range loadImages { + srcImages = append(srcImages, img.srcName) + } + if err := pullImages(dockerBinary, srcImages); err != nil { + return errors.Wrap(err, "error pulling image list") + } + } else { + if err := loadFrozenImages(dockerBinary); err != nil { + return err + } + } + + for _, img := range loadImages { + if img.srcName != img.destName { + if out, err := exec.Command(dockerBinary, "tag", img.srcName, img.destName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + if out, err := exec.Command(dockerBinary, "rmi", img.srcName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + } + } + return nil +} + +func loadFrozenImages(dockerBinary string) error { + tar, err := exec.LookPath("tar") + if err != nil { + return errors.Wrap(err, "could not find tar binary") + } + tarCmd := exec.Command(tar, "-cC", frozenImgDir, ".") + out, err := tarCmd.StdoutPipe() + if err != nil { + return errors.Wrap(err, "error getting stdout pipe for tar command") + } + + errBuf := bytes.NewBuffer(nil) + tarCmd.Stderr = errBuf + tarCmd.Start() + defer tarCmd.Wait() + + cmd := exec.Command(dockerBinary, "load") + cmd.Stdin = out + if out, err := cmd.CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + return nil +} + +func pullImages(dockerBinary string, images []string) error { + cwd, err := os.Getwd() + if err != nil { + return errors.Wrap(err, "error getting path to dockerfile") + } + dockerfile := os.Getenv("DOCKERFILE") + if dockerfile == "" { + dockerfile = "Dockerfile" + } + dockerfilePath := filepath.Join(filepath.Dir(filepath.Clean(cwd)), dockerfile) + pullRefs, err := readFrozenImageList(dockerfilePath, images) + if err != nil { + return errors.Wrap(err, "error reading frozen image list") + } + + var wg sync.WaitGroup + chErr := make(chan error, len(images)) + for tag, ref := range pullRefs { + wg.Add(1) + go func(tag, ref string) { + defer wg.Done() + if out, err := exec.Command(dockerBinary, "pull", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "tag", ref, tag).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "rmi", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + }(tag, ref) + } + wg.Wait() + close(chErr) + return <-chErr +} + +func readFrozenImageList(dockerfilePath string, images []string) (map[string]string, error) { + f, err := os.Open(dockerfilePath) + if err != nil { + return nil, errors.Wrap(err, "error reading dockerfile") + } + defer f.Close() + ls := make(map[string]string) + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + if len(line) < 3 { + continue + } + if !(line[0] == "RUN" && line[1] == "./contrib/download-frozen-image-v2.sh") { + continue + } + + frozenImgDir = line[2] + if line[2] == frozenImgDir { + frozenImgDir = filepath.Join(os.Getenv("DEST"), "frozen-images") + } + + for scanner.Scan() { + img := strings.TrimSpace(scanner.Text()) + img = strings.TrimSuffix(img, "\\") + img = strings.TrimSpace(img) + split := strings.Split(img, "@") + if len(split) < 2 { + break + } + + for _, i := range images { + if split[0] == i { + ls[i] = img + break + } + } + } + } + return ls, nil +} diff --git a/integration-cli/fixtures/notary/delgkey1.crt b/integration-cli/fixtures/notary/delgkey1.crt new file mode 100644 index 0000000..2218f23 --- /dev/null +++ b/integration-cli/fixtures/notary/delgkey1.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAP2EcMN2UXPcMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvgewhaYs +Ke5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqIOdxWjYITgJuHrTwB4ZhBqWS7 +tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbWK9PPhGGkeR01c/Q932m92Hsn +fCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4ylPRxs0RrE/rP+bEGssKQSbeCZ +wazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdvBqrRdWnkOZClhlLgEQ5nK2yV +B6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW8oKHlBBl6pRxHIKzNN4VFbeB +vvYvrogrDrC/owIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUFoHfukRa6qGk1ncON64Z +ASKlZdkwDQYJKoZIhvcNAQELBQADggEBAEq9Adpd03CPmpbRtTAJGAkjjLFr60sV +2r+/l/m9R31ZCN9ymM9nxToQ8zfMdeAh/nnPcErziil2gDVqXueCNDkRj09tmDIE +Q1Oc92uyNZNgcECow77cKZCTZSTku+qsJrYaykH5vSnia8ltcKj8inJedIcpBR+p +608HEQvF0Eg5eaLPJwH48BCb0Gqdri1dJgrNnqptz7MDr8M+u7tHVulbAd3YxLlq +JH1W2bkVUx6esbn/MUE5HL5iTuOYREEINvBSmLdmmFkampmCnCB/bDEyJeL9bAkt +ZPIi0UNSnqFKLSP1Vf8AGLXt6iO7+1OGvtsDXEEYdXVOMsSXZtUuT7A= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/delgkey1.key b/integration-cli/fixtures/notary/delgkey1.key new file mode 100644 index 0000000..cb37efc --- /dev/null +++ b/integration-cli/fixtures/notary/delgkey1.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvgewhaYsKe5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqI +OdxWjYITgJuHrTwB4ZhBqWS7tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbW +K9PPhGGkeR01c/Q932m92HsnfCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4yl +PRxs0RrE/rP+bEGssKQSbeCZwazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdv +BqrRdWnkOZClhlLgEQ5nK2yVB6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW +8oKHlBBl6pRxHIKzNN4VFbeBvvYvrogrDrC/owIDAQABAoIBAB/o8KZwsgfUhqh7 +WoViSCwQb0e0z7hoFwhpUl4uXPTGf1v6HEgDDPG0PwwgkdbwNaypQZVtWevj4NTQ +R326jjdjH1xbfQa2PZpz722L3jDqJR6plEtFxRoIv3KrCffPsrgabIu2mnnJJpDB +ixtW5cq0sT4ov2i4H0i85CWWwbSY/G/MHsvCuK9PhoCj9uToVqrf1KrAESE5q4fh +mPSYUL99KVnj7SZkUz+79rc8sLLPVks3szZACMlm1n05ZTj/d6Nd2ZZUO45DllIj +1XJghfWmnChrB/P/KYXgQ3Y9BofIAw1ra2y3wOZeqRFNsbmojcGldfdtN/iQzhEj +uk4ThokCgYEA9FTmv36N8qSPWuqX/KzkixDQ8WrDGohcB54kK98Wx4ijXx3i38SY +tFjO8YUS9GVo1+UgmRjZbzVX7xeum6+TdBBwOjNOxEQ4tzwiQBWDdGpli8BccdJ2 +OOIVxSslWhiUWfpYloXVetrR88iHbT882g795pbonDaJdXSLnij4UW8CgYEAxxrr +QFpsmOEZvI/yPSOGdG7A1RIsCeH+cEOf4cKghs7+aCtAHlIweztNOrqirl3oKI1r +I0zQl46WsaW8S/y99v9lmmnZbWwqLa4vIu0NWs0zaZdzKZw3xljMhgp4Ge69hHa2 +utCtAxcX+7q/yLlHoTiYwKdxX54iLkheCB8csw0CgYEAleEG820kkjXUIodJ2JwO +Tihwo8dEC6CeI6YktizRgnEVFqH0rCOjMO5Rc+KX8AfNOrK5PnD54LguSuKSH7qi +j04OKgWTSd43lF90+y63RtCFnibQDpp2HwrBJAQFk7EEP/XMJfnPLN/SbuMSADgM +kg8kPTFRW5Iw3DYz9z9WpE0CgYAkn6/8Q2XMbUOFqti9JEa8Lg8sYk5VdwuNbPMA +3QMYKQUk9ieyLB4c3Nik3+XCuyVUKEc31A5egmz3umu7cn8i6vGuiJ/k/8t2YZ7s +Bry5Ihu95Yzab5DW3Eiqs0xKQN79ebS9AluAwQO5Wy2h52rknfuDHIm/M+BHsSoS +xl5KFQKBgQCokCsYuX1z2GojHw369/R2aX3ovCGuHqy4k7fWxUrpHTHvth2+qNPr +84qLJ9rLWoZE5sUiZ5YdwCgW877EdfkT+v4aaBX79ixso5VdqgJ/PdnoNntah/Vq +njQiW1skn6/P5V/eyimN2n0VsyBr/zMDEtYTRP/Tb1zi/njFLQkZEA== +-----END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/delgkey2.crt b/integration-cli/fixtures/notary/delgkey2.crt new file mode 100644 index 0000000..bec0847 --- /dev/null +++ b/integration-cli/fixtures/notary/delgkey2.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAIq8naKlYAQfMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyY2EWYTW +5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aHfoOe8wGKg3Ohz7UCBdD5Mob/ +L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3AaawEUOw2rwwMDEjLnDDTSZM +z8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY8ioRbROCL2PGgqywWq2fThav +c70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFHVARXiUv/ILHk7ImYnSGJUcuk +JTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJDSiRP72nkg/cE4BqMl9FrMwK +9iS8xa9yMDLUvwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUvQzzFmh3Sv3HcdExY3wx +/1u6JLAwDQYJKoZIhvcNAQELBQADggEBAJcmDme2Xj/HPUPwaN/EyCmjhY73EiHO +x6Pm16tscg5JGn5A+u3CZ1DmxUYl8Hp6MaW/sWzdtL0oKJg76pynadCWh5EacFR8 +u+2GV/IcN9mSX6JQzvrqbjSqo5/FehqBD+W5h3euwwApWA3STAadYeyEfmdOA3SQ +W1vzrA1y7i8qgTqeJ7UX1sEAXlIhBK2zPYaMB+en+ZOiPyNxJYj6IDdGdD2paC9L +6H9wKC+GAUTSdCWp89HP7ETSXEGr94AXkrwU+qNsiN+OyK8ke0EMngEPh5IQoplw +/7zEZCth3oKxvR1/4S5LmTVaHI2ZlbU4q9bnY72G4tw8YQr2gcBGo4w= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/delgkey2.key b/integration-cli/fixtures/notary/delgkey2.key new file mode 100644 index 0000000..5ccabe9 --- /dev/null +++ b/integration-cli/fixtures/notary/delgkey2.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAyY2EWYTW5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aH +foOe8wGKg3Ohz7UCBdD5Mob/L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3 +AaawEUOw2rwwMDEjLnDDTSZMz8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY +8ioRbROCL2PGgqywWq2fThavc70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFH +VARXiUv/ILHk7ImYnSGJUcukJTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJ +DSiRP72nkg/cE4BqMl9FrMwK9iS8xa9yMDLUvwIDAQABAoIBAHmffvzx7ydESWwa +zcfdu26BkptiTvjjfJrqEd4wSewxWGPKqJqMXE8xX99A2KTZClZuKuH1mmnecQQY +iRXGrK9ewFMuHYGeKEiLlPlqR8ohXhyGLVm+t0JDwaXMp5t9G0i73O5iLTm5fNGd +FGxa9YnVW20Q8MqNczbVGH1D1zInhxzzOyFzBd4bBBJ8PdrUdyLpd7+RxY2ghnbT +p9ZANR2vk5zmDLJgZx72n/u+miJWuhY6p0v3Vq4z/HHgdhf+K6vpDdzTcYlA0rO4 +c/c+RKED3ZadGUD5QoLsmEN0e3FVSMPN1kt4ZRTqWfH8f2X4mLz33aBryTjktP6+ +1rX6ThECgYEA74wc1Tq23B5R0/GaMm1AK3Ko2zzTD8wK7NSCElh2dls02B+GzrEB +aE3A2GMQSuzb+EA0zkipwANBaqs3ZemH5G1pu4hstQsXCMd4jAJn0TmTXlplXBCf +PSc8ZUU6XcJENRr9Q7O9/TGlgahX+z0ndxYx/CMCsSu7XsMg4IZsbAcCgYEA12Vb +wKOVG15GGp7pMshr+2rQfVimARUP4gf3JnQmenktI4PfdnMW3a4L3DEHfLhIerwT +6lRp/NpxSADmuT4h1UO1l2lc+gmTVPw0Vbl6VwHpgS5Kfu4ZyM6n3S66f/dE4nu7 +hQF9yZz7vn5Agghak4p6a1wC1gdMzR1tvxFzk4kCgYByBMTskWfcWeok8Yitm+bB +R3Ar+kWT7VD97SCETusD5uG+RTNLSmEbHnc+B9kHcLo67YS0800pAeOvPBPARGnU +RmffRU5I1iB+o0MzkSmNItSMQoagTaEd4IEUyuC/I+qHRHNsOC+kRm86ycAm67LP +MhdUpe1wGxqyPjp15EXTHQKBgDKzFu+3EWfJvvKRKQ7dAh3BvKVkcl6a2Iw5l8Ej +YdM+JpPPfI/i8yTmzL/dgoem0Nii4IUtrWzo9fUe0TAVId2S/HFRSaNJEbbVTnRH +HjbQqmfPv5U08jjD+9siHp/0UfCFc1QRT8xe+RqTmReCY9+KntoaZEiAm2FEZgqt +TukRAoGAf7QqbTP5/UH1KSkX89F5qy/6GS3pw6TLj9Ufm/l/NO8Um8gag6YhEKWR +7HpkpCqjfWj8Av8ESR9cqddPGrbdqXFm9z7dCjlAd5T3Q3h/h+v+JzLQWbsI6WOb +SsOSWNyE006ZZdIiFwO6GfxpLI24sVtYKgyob6Q71oxSqfnrnT0= +-----END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/delgkey3.crt b/integration-cli/fixtures/notary/delgkey3.crt new file mode 100644 index 0000000..f434b45 --- /dev/null +++ b/integration-cli/fixtures/notary/delgkey3.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAKHt/jxiWqMtMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqfbJk2Dk +C9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJzetsclsV/95nBhinIGcSmPQA +l318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCeS86SOyLNTpMD9gsF0S8nR1RN +h0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5PhyrMZgNip4IrG46umCkFlrw +zMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKorIJQbPtHVYdr4UxYnNmk6fbU +biEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj9fZ7Viw0t5IKXZPsxMhwknUT +9vmPzIJO6NiniwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUdTXRP1EzxQ+UDZSoheVo +Mobud1cwDQYJKoZIhvcNAQELBQADggEBADV9asTWWdbmpkeRuKyi0xGho39ONK88 +xxkFlco766BVgemo/rGQj3oPuw6M6SzHFoJ6JUPjmLiAQDIGEU/2/b6LcOuLjP+4 +YejCcDTY3lSW/HMNoAmzr2foo/LngNGfe/qhVFUqV7GjFT9+XzFFBfIZ1cQiL2ed +kc8rgQxFPwWXFCSwaENWeFnMDugkd+7xanoAHq8GsJpg5fTruDTmJkUqC2RNiMLn +WM7QaqW7+lmUnMnc1IBoz0hFhgoiadWM/1RQxx51zTVw6Au1koIm4ZXu5a+/WyC8 +K1+HyUbc0AVaDaRBpRSOR9aHRwLGh6WQ4aUZQNyJroc999qfYrDEEV8= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/delgkey3.key b/integration-cli/fixtures/notary/delgkey3.key new file mode 100644 index 0000000..a61d18c --- /dev/null +++ b/integration-cli/fixtures/notary/delgkey3.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAqfbJk2DkC9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJ +zetsclsV/95nBhinIGcSmPQAl318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCe +S86SOyLNTpMD9gsF0S8nR1RNh0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5 +PhyrMZgNip4IrG46umCkFlrwzMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKo +rIJQbPtHVYdr4UxYnNmk6fbUbiEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj +9fZ7Viw0t5IKXZPsxMhwknUT9vmPzIJO6NiniwIDAQABAoIBAQCAr/ed3A2umO7T +FDYZik3nXBiiiW4t7r+nGGgZ3/kNgY1lnuHlROxehXLZwbX1mrLnyML/BjhwezV9 +7ZNVPd6laVPpNj6DyxtWHRZ5yARlm1Al39E7CpQTrF0QsiWcpGnqIa62xjDRTpnq +askV/Q5qggyvqmE9FnFCQpEiAjlhvp7F0kVHVJm9s3MK3zSyR0UTZ3cpYus2Jr2z +OotHgAMHq5Hgb3dvxOeE2xRMeYAVDujbkNzXm2SddAtiRdLhWDh7JIr3zXhp0HyN +4rLOyhlgz00oIGeDt/C0q3fRmghr3iZOG+7m2sUx0FD1Ru1dI9v2A+jYmIVNW6+x +YJk5PzxJAoGBANDj7AGdcHSci/LDBPoTTUiz3uucAd27/IJma/iy8mdbVfOAb0Fy +PRSPvoozlpZyOxg2J4eH/o4QxQR4lVKtnLKZLNHK2tg3LarwyBX1LiI3vVlB+DT1 +AmV8i5bJAckDhqFeEH5qdWZFi03oZsSXWEqX5iMYCrdK5lTZggcrFZeHAoGBANBL +fkk3knAdcVfTYpmHx18GBi2AsCWTd20KD49YBdbVy0Y2Jaa1EJAmGWpTUKdYx40R +H5CuGgcAviXQz3bugdTU1I3tAclBtpJNU7JkhuE+Epz0CM/6WERJrE0YxcGQA5ui +6fOguFyiXD1/85jrDBOKy74aoS7lYz9r/a6eqmjdAoGBAJpm/nmrIAZx+Ff2ouUe +A1Ar9Ch/Zjm5zEmu3zwzOU4AiyWz14iuoktifNq2iyalRNz+mnVpplToPFizsNwu +C9dPtXtU0DJlhtIFrD/evLz6KnGhe4/ZUm4lgyBvb2xfuNHqL5Lhqelwmil6EQxb +Oh3Y7XkfOjyFln89TwlxZUJdAoGAJRMa4kta7EvBTeGZLjyltvsqhFTghX+vBSCC +ToBbYbbiHJgssXSPAylU4sD7nR3HPwuqM6VZip+OOMrm8oNXZpuPTce+xqTEq1vK +JvmPrG3RAFDLdMFZjqYSXhKnuGE60yv3Ol8EEbDwfB3XLQPBPYU56Jdy0xcPSE2f +dMJXEJ0CgYEAisZw0nXw6lFeYecu642EGuU0wv1O9i21p7eho9QwOcsoTl4Q9l+M +M8iBv+qTHO+D19l4JbkGvy2H2diKoYduUFACcuiFYs8fjrT+4Z6DyOQAQGAf6Ylw +BFbU15k6KbA9v4mZDfd1tY9x62L/XO55ZxYG+J+q0e26tEThgD8cEog= +-----END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/delgkey4.crt b/integration-cli/fixtures/notary/delgkey4.crt new file mode 100644 index 0000000..c8cbe46 --- /dev/null +++ b/integration-cli/fixtures/notary/delgkey4.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJANae++ZkUEWMMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqULAjgba +Y2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4ltkQj1iO4zBTs0Ft9EzXFc5ZBh +pTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3HZpVOlEMI3npRfBGNIBllUaRN +PWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ImhSo3aipJUHHcp9Z9NgvpNC +3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw+YTrWZq3qVnnqUouHO//c9PG +Ry3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih58i/OBKe81eD9NuZDP2KrjTxI +5xkXKhj6DV2NnQIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUDt95hiqbQvi0KcvZGAUu +VisnztQwDQYJKoZIhvcNAQELBQADggEBAGi7qHai7MWbfeu6SlXhzIP3AIMa8TMi +lp/+mvPUFPswIVqYJ71MAN8uA7CTH3z50a2vYupGeOEtZqVJeRf+xgOEpwycncxp +Qz6wc6TWPVIoT5q1Hqxw1RD2MyKL+Y+QBDYwFxFkthpDMlX48I9frcqoJUWFxBF2 +lnRr/cE7BbPE3sMbXV3wGPlH7+eUf+CgzXJo2HB6THzagyEgNrDiz/0rCQa1ipFd +mNU3D/U6BFGmJNxhvSOtXX9escg8yjr05YwwzokHS2K4jE0ZuJPBd50C/Rvo3Mf4 +0h7/2Q95e7d42zPe9WYPu2F8KTWsf4r+6ddhKrKhYzXIcTAfHIOiO+U= +-----END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/delgkey4.key b/integration-cli/fixtures/notary/delgkey4.key new file mode 100644 index 0000000..f473cc4 --- /dev/null +++ b/integration-cli/fixtures/notary/delgkey4.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqULAjgbaY2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4lt +kQj1iO4zBTs0Ft9EzXFc5ZBhpTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3H +ZpVOlEMI3npRfBGNIBllUaRNPWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ +ImhSo3aipJUHHcp9Z9NgvpNC3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw ++YTrWZq3qVnnqUouHO//c9PGRy3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih5 +8i/OBKe81eD9NuZDP2KrjTxI5xkXKhj6DV2NnQIDAQABAoIBAGK0ZKnuYSiXux60 +5MvK4pOCsa/nY3mOcgVHhW4IzpRgJdIrcFOlz9ncXrBsSAIWjX7o3u2Ydvjs4DOW +t8d6frB3QiDInYcRVDjLCD6otWV97Bk9Ua0G4N4hAWkMF7ysV4oihS1JDSoAdo39 +qOdki6s9yeyHZGKwk2oHLlowU5TxQMBA8DHmxqBII1HTm+8xRz45bcEqRXydYSUn +P1JuSU9jFqdylxU+Nrq6ehslMQ3y7qNWQyiLGxu6EmR+vgrzSU0s3iAOqCHthaOS +VBBXPL3DNEYUS+0QGnGrACuJhanOMBfdiO6Orelx6ZzWZm38PNGv0yBt0WCM+8/A +TtQNGkECgYEA1LqR6AH9XikUQ0+rM4526BgVuYqtjw21h4Lj9alaA+YTQntBBJOv +iAcUpnJiV4T8jzAMLeqpK8R/rbxRnK5S9jOV2gr+puk4L6tH46cgahBUESDigDp8 +6vK8ur6ubBcXNPh3AT6rsPj+Ph2EU3raqiYdouvCdga/OCYZb+jr6UkCgYEAy7Cr +l8WssI/8/ORcQ4MFJFNyfz/Y2beNXyLd1PX0H+wRSiGcKzeUuTHNtzFFpMbrK/nx +ZOPCT2ROdHsBHzp1L+WquCb0fyMVSiYiXBU+VCFDbUU5tBr3ycTc7VwuFPENOiha +IdlWgew/aW110FQHIaqe9g+htRe+mXe++faZtbUCgYB/MSJmNzJX53XvHSZ/CBJ+ +iVAMBSfq3caJRLCqRNzGcf1YBbwFUYxlZ95n+wJj0+byckcF+UW3HqE8rtmZNf3y +qTtTCLnj8JQgpGeybU4LPMIXD7N9+fqQvBwuCC7gABpnGJyHCQK9KNNTLnDdPRqb +G3ki3ZYC3dvdZaJV8E2FyQKBgQCMa5Mf4kqWvezueo+QizZ0QILibqWUEhIH0AWV +1qkhiKCytlDvCjYhJdBnxjP40Jk3i+t6XfmKud/MNTAk0ywOhQoYQeKz8v+uSnPN +f2ekn/nXzq1lGGJSWsDjcXTjQvqXaVIZm7cjgjaE+80IfaUc9H75qvUT3vaq3f5u +XC7DMQKBgQDMAzCCpWlEPbZoFMl6F49+7jG0/TiqM/WRUSQnNtufPMbrR9Je4QM1 +L1UCANCPaHFOncKYer15NfIV1ctt5MZKImevDsUaQO8CUlO+dzd5H8KvHw9E29gA +B22v8k3jIjsYeRL+UJ/sBnWHgxdAe/NEM+TdlP2oP9D1gTifutPqAg== +-----END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/gen.sh b/integration-cli/fixtures/notary/gen.sh new file mode 100755 index 0000000..8d6381c --- /dev/null +++ b/integration-cli/fixtures/notary/gen.sh @@ -0,0 +1,18 @@ +for selfsigned in delgkey1 delgkey2 delgkey3 delgkey4; do + subj='/C=US/ST=CA/L=SanFrancisco/O=Docker/CN=delegation' + + openssl genrsa -out "${selfsigned}.key" 2048 + openssl req -new -key "${selfsigned}.key" -out "${selfsigned}.csr" -sha256 -subj "${subj}" + cat > "${selfsigned}.cnf" < 1 && buf[0] == 'Y' +} + +func NotaryHosting() bool { + // for now notary binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil +} + +func NotaryServerHosting() bool { + // for now notary-server binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil +} + +func NotOverlay() bool { + return StorageDriverIsNot("overlay") +} + +func Devicemapper() bool { + return StorageDriverIs("devicemapper") +} + +func IPv6() bool { + cmd := exec.Command("test", "-f", "/proc/net/if_inet6") + return cmd.Run() != nil +} + +func UserNamespaceROMount() bool { + // quick case--userns not enabled in this test run + if os.Getenv("DOCKER_REMAP_ROOT") == "" { + return true + } + if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil { + return false + } + return true +} + +func NotUserNamespace() bool { + root := os.Getenv("DOCKER_REMAP_ROOT") + return root == "" +} + +func UserNamespaceInKernel() bool { + if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { + /* + * This kernel-provided file only exists if user namespaces are + * supported + */ + return false + } + + // We need extra check on redhat based distributions + if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { + defer f.Close() + b := make([]byte, 1) + _, _ = f.Read(b) + return string(b) != "N" + } + + return true +} + +func IsPausable() bool { + if testEnv.DaemonPlatform() == "windows" { + return testEnv.Isolation() == "hyperv" + } + return true +} + +func NotPausable() bool { + if testEnv.DaemonPlatform() == "windows" { + return testEnv.Isolation() == "process" + } + return false +} + +func IsolationIs(expectedIsolation string) bool { + return testEnv.DaemonPlatform() == "windows" && string(testEnv.Isolation()) == expectedIsolation +} + +func IsolationIsHyperv() bool { + return IsolationIs("hyperv") +} + +func IsolationIsProcess() bool { + return IsolationIs("process") +} + +// testRequires checks if the environment satisfies the requirements +// for the test to run or skips the tests. +func testRequires(c *check.C, requirements ...requirement.Test) { + requirement.Is(c, requirements...) +} diff --git a/integration-cli/requirements_unix_test.go b/integration-cli/requirements_unix_test.go new file mode 100644 index 0000000..2ed04f6 --- /dev/null +++ b/integration-cli/requirements_unix_test.go @@ -0,0 +1,115 @@ +// +build !windows + +package main + +import ( + "bytes" + "io/ioutil" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" +) + +var ( + // SysInfo stores information about which features a kernel supports. + SysInfo *sysinfo.SysInfo +) + +func cpuCfsPeriod() bool { + return SysInfo.CPUCfsPeriod +} + +func cpuCfsQuota() bool { + return SysInfo.CPUCfsQuota +} + +func cpuShare() bool { + return SysInfo.CPUShares +} + +func oomControl() bool { + return SysInfo.OomKillDisable +} + +func pidsLimit() bool { + return SysInfo.PidsLimit +} + +func kernelMemorySupport() bool { + return SysInfo.KernelMemory +} + +func memoryLimitSupport() bool { + return SysInfo.MemoryLimit +} + +func memoryReservationSupport() bool { + return SysInfo.MemoryReservation +} + +func swapMemorySupport() bool { + return SysInfo.SwapLimit +} + +func memorySwappinessSupport() bool { + return SysInfo.MemorySwappiness +} + +func blkioWeight() bool { + return SysInfo.BlkioWeight +} + +func cgroupCpuset() bool { + return SysInfo.Cpuset +} + +func seccompEnabled() bool { + return supportsSeccomp && SysInfo.Seccomp +} + +func bridgeNfIptables() bool { + return !SysInfo.BridgeNFCallIPTablesDisabled +} + +func bridgeNfIP6tables() bool { + return !SysInfo.BridgeNFCallIP6TablesDisabled +} + +func unprivilegedUsernsClone() bool { + content, err := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone") + return err != nil || !strings.Contains(string(content), "0") +} + +func ambientCapabilities() bool { + content, err := ioutil.ReadFile("/proc/self/status") + return err != nil || strings.Contains(string(content), "CapAmb:") +} + +func overlayFSSupported() bool { + cmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "cat /proc/filesystems") + out, err := cmd.CombinedOutput() + if err != nil { + return false + } + return bytes.Contains(out, []byte("overlay\n")) +} + +func overlay2Supported() bool { + if !overlayFSSupported() { + return false + } + + daemonV, err := kernel.ParseRelease(testEnv.DaemonKernelVersion()) + if err != nil { + return false + } + requiredV := kernel.VersionInfo{Kernel: 4} + return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 + +} + +func init() { + SysInfo = sysinfo.New(true) +} diff --git a/integration-cli/test_vars_exec_test.go b/integration-cli/test_vars_exec_test.go new file mode 100644 index 0000000..7633b34 --- /dev/null +++ b/integration-cli/test_vars_exec_test.go @@ -0,0 +1,8 @@ +// +build !test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = true +) diff --git a/integration-cli/test_vars_noexec_test.go b/integration-cli/test_vars_noexec_test.go new file mode 100644 index 0000000..0845090 --- /dev/null +++ b/integration-cli/test_vars_noexec_test.go @@ -0,0 +1,8 @@ +// +build test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = false +) diff --git a/integration-cli/test_vars_noseccomp_test.go b/integration-cli/test_vars_noseccomp_test.go new file mode 100644 index 0000000..2f47ab0 --- /dev/null +++ b/integration-cli/test_vars_noseccomp_test.go @@ -0,0 +1,8 @@ +// +build !seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = false +) diff --git a/integration-cli/test_vars_seccomp_test.go b/integration-cli/test_vars_seccomp_test.go new file mode 100644 index 0000000..00cf697 --- /dev/null +++ b/integration-cli/test_vars_seccomp_test.go @@ -0,0 +1,8 @@ +// +build seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = true +) diff --git a/integration-cli/test_vars_test.go b/integration-cli/test_vars_test.go new file mode 100644 index 0000000..139279c --- /dev/null +++ b/integration-cli/test_vars_test.go @@ -0,0 +1,11 @@ +package main + +// sleepCommandForDaemonPlatform is a helper function that determines what +// the command is for a sleeping container based on the daemon platform. +// The Windows busybox image does not have a `top` command. +func sleepCommandForDaemonPlatform() []string { + if testEnv.DaemonPlatform() == "windows" { + return []string{"sleep", "240"} + } + return []string{"top"} +} diff --git a/integration-cli/test_vars_unix_test.go b/integration-cli/test_vars_unix_test.go new file mode 100644 index 0000000..f9ecc01 --- /dev/null +++ b/integration-cli/test_vars_unix_test.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = true + + expectedFileChmod = "-rw-r--r--" + + // On Unix variants, the busybox image comes with the `top` command which + // runs indefinitely while still being interruptible by a signal. + defaultSleepImage = "busybox" +) diff --git a/integration-cli/test_vars_windows_test.go b/integration-cli/test_vars_windows_test.go new file mode 100644 index 0000000..bfc9a5a --- /dev/null +++ b/integration-cli/test_vars_windows_test.go @@ -0,0 +1,15 @@ +// +build windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = false + + // this is the expected file permission set on windows: gh#11395 + expectedFileChmod = "-rwxr-xr-x" + + // On Windows, the busybox image doesn't have the `top` command, so we rely + // on `sleep` with a high duration. + defaultSleepImage = "busybox" +) diff --git a/integration-cli/trust_server_test.go b/integration-cli/trust_server_test.go new file mode 100644 index 0000000..e3f0674 --- /dev/null +++ b/integration-cli/trust_server_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + icmd "github.com/docker/docker/pkg/testutil/cmd" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" +) + +var notaryBinary = "notary" +var notaryServerBinary = "notary-server" + +type keyPair struct { + Public string + Private string +} + +type testNotary struct { + cmd *exec.Cmd + dir string + keys []keyPair +} + +const notaryHost = "localhost:4443" +const notaryURL = "https://" + notaryHost + +var SuccessTagging = icmd.Expected{ + Out: "Tagging", +} + +var SuccessSigningAndPushing = icmd.Expected{ + Out: "Signing and pushing trust metadata", +} + +var SuccessDownloaded = icmd.Expected{ + Out: "Status: Downloaded", +} + +var SuccessDownloadedOnStderr = icmd.Expected{ + Err: "Status: Downloaded", +} + +func newTestNotary(c *check.C) (*testNotary, error) { + // generate server config + template := `{ + "server": { + "http_addr": "%s", + "tls_key_file": "%s", + "tls_cert_file": "%s" + }, + "trust_service": { + "type": "local", + "hostname": "", + "port": "", + "key_algorithm": "ed25519" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "memory" + } +}` + tmp, err := ioutil.TempDir("", "notary-test-") + if err != nil { + return nil, err + } + confPath := filepath.Join(tmp, "config.json") + config, err := os.Create(confPath) + if err != nil { + return nil, err + } + defer config.Close() + + workingDir, err := os.Getwd() + if err != nil { + return nil, err + } + if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // generate client config + clientConfPath := filepath.Join(tmp, "client-config.json") + clientConfig, err := os.Create(clientConfPath) + if err != nil { + return nil, err + } + defer clientConfig.Close() + + template = `{ + "trust_dir" : "%s", + "remote_server": { + "url": "%s", + "skipTLSVerify": true + } +}` + if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.Dir(), "trust"), notaryURL); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // load key fixture filenames + var keys []keyPair + for i := 1; i < 5; i++ { + keys = append(keys, keyPair{ + Public: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.crt", i)), + Private: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.key", i)), + }) + } + + // run notary-server + cmd := exec.Command(notaryServerBinary, "-config", confPath) + if err := cmd.Start(); err != nil { + os.RemoveAll(tmp) + if os.IsNotExist(err) { + c.Skip(err.Error()) + } + return nil, err + } + + testNotary := &testNotary{ + cmd: cmd, + dir: tmp, + keys: keys, + } + + // Wait for notary to be ready to serve requests. + for i := 1; i <= 20; i++ { + if err = testNotary.Ping(); err == nil { + break + } + time.Sleep(10 * time.Millisecond * time.Duration(i*i)) + } + + if err != nil { + c.Fatalf("Timeout waiting for test notary to become available: %s", err) + } + + return testNotary, nil +} + +func (t *testNotary) Ping() error { + tlsConfig := tlsconfig.ClientDefault() + tlsConfig.InsecureSkipVerify = true + client := http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + }, + } + resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (t *testNotary) Close() { + t.cmd.Process.Kill() + t.cmd.Process.Wait() + os.RemoveAll(t.dir) +} + +func trustedCmd(cmd *icmd.Cmd) func() { + pwd := "12345678" + cmd.Env = append(cmd.Env, trustEnv(notaryURL, pwd, pwd)...) + return nil +} + +func trustedCmdWithServer(server string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + pwd := "12345678" + cmd.Env = append(cmd.Env, trustEnv(server, pwd, pwd)...) + return nil + } +} + +func trustedCmdWithPassphrases(rootPwd, repositoryPwd string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Env = append(cmd.Env, trustEnv(notaryURL, rootPwd, repositoryPwd)...) + return nil + } +} + +func trustEnv(server, rootPwd, repositoryPwd string) []string { + env := append(os.Environ(), []string{ + "DOCKER_CONTENT_TRUST=1", + fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), + fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), + fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), + }...) + return env +} + +func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "tag", "busybox", repoName) + cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "rmi", repoName) + return repoName +} + +func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + cli.DockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source) + cli.Docker(cli.Args("plugin", "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) + cli.DockerCmd(c, "plugin", "rm", "-f", repoName) + return repoName +} + +func (s *DockerTrustSuite) notaryCmd(c *check.C, args ...string) string { + pwd := "12345678" + env := []string{ + fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd), + } + result := icmd.RunCmd(icmd.Cmd{ + Command: append([]string{notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json")}, args...), + Env: append(os.Environ(), env...), + }) + result.Assert(c, icmd.Success) + return result.Combined() +} + +func (s *DockerTrustSuite) notaryInitRepo(c *check.C, repoName string) { + s.notaryCmd(c, "init", repoName) +} + +func (s *DockerTrustSuite) notaryCreateDelegation(c *check.C, repoName, role string, pubKey string, paths ...string) { + pathsArg := "--all-paths" + if len(paths) > 0 { + pathsArg = "--paths=" + strings.Join(paths, ",") + } + + s.notaryCmd(c, "delegation", "add", repoName, role, pubKey, pathsArg) +} + +func (s *DockerTrustSuite) notaryPublish(c *check.C, repoName string) { + s.notaryCmd(c, "publish", repoName) +} + +func (s *DockerTrustSuite) notaryImportKey(c *check.C, repoName, role string, privKey string) { + s.notaryCmd(c, "key", "import", privKey, "-g", repoName, "-r", role) +} + +func (s *DockerTrustSuite) notaryListTargetsInRole(c *check.C, repoName, role string) map[string]string { + out := s.notaryCmd(c, "list", repoName, "-r", role) + + // should look something like: + // NAME DIGEST SIZE (BYTES) ROLE + // ------------------------------------------------------------------------------------------------------ + // latest 24a36bbc059b1345b7e8be0df20f1b23caa3602e85d42fff7ecd9d0bd255de56 1377 targets + + targets := make(map[string]string) + + // no target + lines := strings.Split(strings.TrimSpace(out), "\n") + if len(lines) == 1 && strings.Contains(out, "No targets present in this repository.") { + return targets + } + + // otherwise, there is at least one target + c.Assert(len(lines), checker.GreaterOrEqualThan, 3) + + for _, line := range lines[2:] { + tokens := strings.Fields(line) + c.Assert(tokens, checker.HasLen, 4) + targets[tokens[0]] = tokens[3] + } + + return targets +} + +func (s *DockerTrustSuite) assertTargetInRoles(c *check.C, repoName, target string, roles ...string) { + // check all the roles + for _, role := range roles { + targets := s.notaryListTargetsInRole(c, repoName, role) + roleName, ok := targets[target] + c.Assert(ok, checker.True) + c.Assert(roleName, checker.Equals, role) + } +} + +func (s *DockerTrustSuite) assertTargetNotInRoles(c *check.C, repoName, target string, roles ...string) { + targets := s.notaryListTargetsInRole(c, repoName, "targets") + + roleName, ok := targets[target] + if ok { + for _, role := range roles { + c.Assert(roleName, checker.Not(checker.Equals), role) + } + } +} diff --git a/integration-cli/utils_test.go b/integration-cli/utils_test.go new file mode 100644 index 0000000..2725ddf --- /dev/null +++ b/integration-cli/utils_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "os/exec" + + "github.com/docker/docker/pkg/testutil/cmd" +) + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if testEnv.DaemonPlatform() == "windows" { + return "c:", `\` + } + return "", "/" +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +// Deprecated: use pkg/testutil/cmd instead +func runCommandWithOutput(execCmd *exec.Cmd) (string, int, error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.Combined(), result.ExitCode, result.Error +} + +// Temporary shim for migrating commands to the new function +func transformCmd(execCmd *exec.Cmd) cmd.Cmd { + return cmd.Cmd{ + Command: execCmd.Args, + Env: execCmd.Env, + Dir: execCmd.Dir, + Stdin: execCmd.Stdin, + Stdout: execCmd.Stdout, + } +} diff --git a/landr.conf.js b/landr.conf.js new file mode 100644 index 0000000..e8248d2 --- /dev/null +++ b/landr.conf.js @@ -0,0 +1,88 @@ +const fs = require('fs') + +const getArch = (str) => { + const [ _, arch ] = str.match(/-([^-]+)\.tar\.gz$/) + return arch +} + +const packagePrettyName = (str) => `Docker for ${getArch(str)}` + +const prepAssets = (release) => { + release.assets = release.assets.map((asset) => { + return Object.assign({}, asset, { + prettyName: packagePrettyName(asset.name), + arch: getArch(asset.name), + os: 'Linux' + }) + }) + + return release +} + +module.exports = { + theme: 'landr-theme-basic', + hooks: { + 'post-build': ({ config }) => { + const data = fs.readFileSync(`${__dirname}/contrib/install.sh`, 'utf-8') + return fs.writeFileSync(`${config.distDir}/install.sh`, data) + } + }, + middleware: (store, action, next) => { + if (action.type === 'ADD_RELEASE') { + // intercept all releases and add pretty labels to assets + action.payload = prepAssets(action.payload) + } + + return next(action) + }, + settings: { + lead: 'A Moby-based container engine for IoT', + analytics: { + mixpanelToken: '81dd4ca517f8bd50d46aa8fe057882ac', // mixpanelToken + gosquaredId: 'GSN-122115-A', // gosquared Id + gaSite: 'balena.io', // google Analytics site eg balena.io + gaId: 'UA-45671959-5' // google Analytics ID + }, + theme: { + colors: { + primary: '#00A375' + } + }, + callToActionCommand: 'curl -sfL https://balena.io/install.sh | sh', + features: [ + { + 'title': 'Small footprint', + 'image': 'footprint', + 'description': '3.5x smaller than Docker CE, packaged as a single binary' + }, + { + 'title': 'Multi-arch support', + 'image': 'multiple', + 'description': 'Available for a wide variety of chipset architectures, supporting everything from tiny IoT devices to large industrial gateways' + }, + { + 'title': 'True container deltas', + 'image': 'bandwidth', + 'description': 'Bandwidth-efficient updates with binary diffs, 10-70x smaller than pulling layers in common scenarios' + }, + { + 'title': 'Minimal wear-and-tear', + 'image': 'storage', + 'description': 'Extract layers as they arrive to prevent excessive writing to disk, protecting your storage from eventual corruption' + }, + { + 'title': 'Failure-resistant pulls', + 'image': 'failure-resistant', + 'description': 'Atomic and durable image pulls defend against partial container pulls in the event of power failure' + }, + { + 'title': 'Conservative memory use', + 'image': 'undisturbed', + 'description': 'Prevents page cache thrashing during image pull, so your application runs undisturbed in low-memory situations' + } + ], + motivation: [ + 'Balena is a new container engine purpose-built for embedded and IoT use cases and compatible with Docker containers.

Based on Docker’s Moby Project, balena supports container deltas for 10-70x more efficient bandwidth usage, has 3.5x smaller binaries, uses RAM and storage more conservatively, and focuses on atomicity and durability of container pulling.

Read more in our blog post.' + ], + } +} diff --git a/layer/empty.go b/layer/empty.go new file mode 100644 index 0000000..f70f844 --- /dev/null +++ b/layer/empty.go @@ -0,0 +1,74 @@ +package layer + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + + "github.com/docker/docker/pkg/ioutils" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarSeekStream() (ioutils.ReadSeekCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutils.NewReadSeekCloserWrapper(bytes.NewReader(buf.Bytes()), func() error {return nil}), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +func (el *emptyLayer) Platform() Platform { + return "" +} + +// IsEmpty returns true if the layer is an EmptyLayer +func IsEmpty(diffID DiffID) bool { + return diffID == DigestSHA256EmptyTar +} diff --git a/layer/empty_test.go b/layer/empty_test.go new file mode 100644 index 0000000..5555dbd --- /dev/null +++ b/layer/empty_test.go @@ -0,0 +1,46 @@ +package layer + +import ( + "io" + "testing" + + "github.com/opencontainers/go-digest" +) + +func TestEmptyLayer(t *testing.T) { + if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { + t.Fatal("wrong ChainID for empty layer") + } + + if EmptyLayer.DiffID() != DigestSHA256EmptyTar { + t.Fatal("wrong DiffID for empty layer") + } + + if EmptyLayer.Parent() != nil { + t.Fatal("expected no parent for empty layer") + } + + if size, err := EmptyLayer.Size(); err != nil || size != 0 { + t.Fatal("expected zero size for empty layer") + } + + if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { + t.Fatal("expected zero diffsize for empty layer") + } + + tarStream, err := EmptyLayer.TarStream() + if err != nil { + t.Fatalf("error streaming tar for empty layer: %v", err) + } + + digester := digest.Canonical.Digester() + _, err = io.Copy(digester.Hash(), tarStream) + + if err != nil { + t.Fatalf("error hashing empty tar layer: %v", err) + } + + if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { + t.Fatal("empty layer tar stream hashes to wrong value") + } +} diff --git a/layer/filestore.go b/layer/filestore.go new file mode 100644 index 0000000..7ea418c --- /dev/null +++ b/layer/filestore.go @@ -0,0 +1,354 @@ +package layer + +import ( + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/layer/filestore_test.go b/layer/filestore_test.go new file mode 100644 index 0000000..2126a20 --- /dev/null +++ b/layer/filestore_test.go @@ -0,0 +1,104 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + + "github.com/opencontainers/go-digest" +) + +func randomLayerID(seed int64) ChainID { + r := rand.New(rand.NewSource(seed)) + + return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))) +} + +func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { + td, err := ioutil.TempDir("", "layers-") + if err != nil { + t.Fatal(err) + } + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + + return fms.(*fileMetadataStore), td, func() { + if err := os.RemoveAll(td); err != nil { + t.Logf("Failed to cleanup %q: %s", td, err) + } + } +} + +func assertNotDirectoryError(t *testing.T, err error) { + perr, ok := err.(*os.PathError) + if !ok { + t.Fatalf("Unexpected error %#v, expected path error", err) + } + + if perr.Err != syscall.ENOTDIR { + t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) + } +} + +func TestCommitFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if err := tx.SetSize(0); err != nil { + t.Fatal(err) + } + + err = tx.Commit(randomLayerID(5)) + if err == nil { + t.Fatalf("Expected error committing with invalid layer parent directory") + } + assertNotDirectoryError(t, err) +} + +func TestStartTransactionFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + _, err := fms.StartTransaction() + if err == nil { + t.Fatalf("Expected error starting transaction with invalid layer parent directory") + } + assertNotDirectoryError(t, err) + + if err := os.Remove(filepath.Join(td, "tmp")); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { + t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) + } + + if err := tx.Cancel(); err != nil { + t.Fatal(err) + } +} diff --git a/layer/filestore_unix.go b/layer/filestore_unix.go new file mode 100644 index 0000000..fe8a4f8 --- /dev/null +++ b/layer/filestore_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package layer + +// SetPlatform writes the "platform" file to the layer filestore +func (fm *fileMetadataTransaction) SetPlatform(platform Platform) error { + return nil +} + +// GetPlatform reads the "platform" file from the layer filestore +func (fms *fileMetadataStore) GetPlatform(layer ChainID) (Platform, error) { + return "", nil +} diff --git a/layer/filestore_windows.go b/layer/filestore_windows.go new file mode 100644 index 0000000..066456d --- /dev/null +++ b/layer/filestore_windows.go @@ -0,0 +1,35 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +// SetPlatform writes the "platform" file to the layer filestore +func (fm *fileMetadataTransaction) SetPlatform(platform Platform) error { + if platform == "" { + return nil + } + return fm.ws.WriteFile("platform", []byte(platform), 0644) +} + +// GetPlatform reads the "platform" file from the layer filestore +func (fms *fileMetadataStore) GetPlatform(layer ChainID) (Platform, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "platform")) + if err != nil { + // For backwards compatibility, the platform file may not exist. Default to "windows" if missing. + if os.IsNotExist(err) { + return "windows", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content != "windows" && content != "linux" { + return "", fmt.Errorf("invalid platform value: %s", content) + } + + return Platform(content), nil +} diff --git a/layer/layer.go b/layer/layer.go new file mode 100644 index 0000000..007ee11 --- /dev/null +++ b/layer/layer.go @@ -0,0 +1,300 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current platform + ErrNotSupported = errors.New("not support on this platform") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// Platform is the platform of a layer +type Platform string + +// String returns a string rendition of layers target platform +func (id Platform) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarSeekStream returns a seekable tar archive stream for the contents of + // a layer + TarSeekStream() (ioutils.ReadSeekCloser, error) + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Platform returns the platform of the layer + Platform() Platform + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (string, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer +type CreateRWLayerOpts struct { + MountLabel string + InitFunc MountInit + StorageOpt map[string]string +} + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID, Platform) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, Platform, distribution.Descriptor) (Layer, error) +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + SetDescriptor(distribution.Descriptor) error + SetPlatform(Platform) error + TarSplitWriter(compressInput bool) (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + GetDescriptor(ChainID) (distribution.Descriptor, error) + GetPlatform(ChainID) (Platform, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referenced + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/layer/layer_store.go b/layer/layer_store.go new file mode 100644 index 0000000..850a5e6 --- /dev/null +++ b/layer/layer_store.go @@ -0,0 +1,789 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsplitutils" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex + + useTarSplit bool + + platform string +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + StorePath string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + IDMappings *idtools.IDMappings + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool + Platform string +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.StorePath, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.IDMappings.UIDs(), + GIDMaps: options.IDMappings.GIDs(), + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) + if err != nil { + return nil, err + } + + return NewStoreFromGraphDriver(fms, driver, options.Platform) +} + +// NewStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, platform string) (Store, error) { + caps := graphdriver.Capabilities{} + if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { + caps = capDriver.Capabilities() + } + + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + useTarSplit: !caps.ReproducesExactDiffs, + platform: platform, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + platform, err := ls.store.GetPlatform(layer) + if err != nil { + return nil, fmt.Errorf("failed to get platform for %s: %s", layer, err) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + platform: platform, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.Digester() + tr := io.TeeReader(ts, digester.Hash()) + + rdr := tr + if ls.useTarSplit { + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err = asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID, platform Platform) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, platform, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + + // Integrity check - ensure we are creating something for the correct platform + if system.LCOWSupported() { + if strings.ToLower(ls.platform) != strings.ToLower(string(platform)) { + return nil, fmt.Errorf("cannot create entry for platform %q in layer store for platform %q", platform, ls.platform) + } + } + + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + platform: platform, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) { + var ( + storageOpt map[string]string + initFunc MountInit + mountLabel string + ) + + if opts != nil { + mountLabel = opts.MountLabel + storageOpt = opts.StorageOpt + initFunc = opts.InitFunc + } + + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return nil, err + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return nil, err + } + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { + if !ls.useTarSplit { + var parentCacheID string + if rl.parent != nil { + parentCacheID = rl.parent.cacheID + } + + return ls.driver.Diff(rl.cacheID, parentCacheID) + } + + r, err := ls.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := ls.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + + return pr, nil +} + +func (ls *layerStore) getTarSeekStream(rl *roLayer) (ioutils.ReadSeekCloser, error) { + if !ls.useTarSplit { + return nil, fmt.Errorf("unsupported backend driver for seek streams") + } + + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + metadata, err := ls.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(rl.cacheID) + if err != nil { + return nil, err + } + + metaUnpacker := storage.NewJSONUnpacker(metadata) + + stream, err := tarsplitutils.NewRandomAccessTarStream(fileGetCloser, metaUnpacker) + if err != nil { + return nil, err + } + + return ioutils.NewReadSeekCloserWrapper(stream, func() error { + return fileGetCloser.Close() + }), nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil +} diff --git a/layer/layer_store_windows.go b/layer/layer_store_windows.go new file mode 100644 index 0000000..ccbf6dd --- /dev/null +++ b/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, platform, descriptor) +} diff --git a/layer/layer_test.go b/layer/layer_test.go new file mode 100644 index 0000000..8ec5b4d --- /dev/null +++ b/layer/layer_test.go @@ -0,0 +1,772 @@ +package layer + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" +) + +func init() { + graphdriver.ApplyUncompressedLayer = archive.UnpackLayer + defaultArchiver := archive.NewDefaultArchiver() + vfs.CopyWithTar = defaultArchiver.CopyWithTar +} + +func newVFSGraphDriver(td string) (graphdriver.Driver, error) { + uidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getuid(), + Size: 1, + }, + } + gidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getgid(), + Size: 1, + }, + } + + options := graphdriver.Options{Root: td, UIDMaps: uidMap, GIDMaps: gidMap} + return graphdriver.GetDriver("vfs", nil, options) +} + +func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { + td, err := ioutil.TempDir("", "graph-") + if err != nil { + t.Fatal(err) + } + + driver, err := newVFSGraphDriver(td) + if err != nil { + t.Fatal(err) + } + + return driver, func() { + os.RemoveAll(td) + } +} + +func newTestStore(t *testing.T) (Store, string, func()) { + td, err := ioutil.TempDir("", "layerstore-") + if err != nil { + t.Fatal(err) + } + + graph, graphcleanup := newTestGraphDriver(t) + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + if err != nil { + t.Fatal(err) + } + + return ls, td, func() { + graphcleanup() + os.RemoveAll(td) + } +} + +type layerInit func(root string) error + +func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { + containerID := stringid.GenerateRandomID() + mount, err := ls.CreateRWLayer(containerID, parent, nil) + if err != nil { + return nil, err + } + + path, err := mount.Mount("") + if err != nil { + return nil, err + } + + if err := layerFunc(path); err != nil { + return nil, err + } + + ts, err := mount.TarStream() + if err != nil { + return nil, err + } + defer ts.Close() + + layer, err := ls.Register(ts, parent, Platform(runtime.GOOS)) + if err != nil { + return nil, err + } + + if err := mount.Unmount(); err != nil { + return nil, err + } + + if _, err := ls.ReleaseRWLayer(mount); err != nil { + return nil, err + } + + return layer, nil +} + +type FileApplier interface { + ApplyFile(root string) error +} + +type testFile struct { + name string + content []byte + permission os.FileMode +} + +func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { + return &testFile{ + name: name, + content: content, + permission: perm, + } +} + +func (tf *testFile) ApplyFile(root string) error { + fullPath := filepath.Join(root, tf.name) + if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + return err + } + // Check if already exists + if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { + if err := os.Chmod(fullPath, tf.permission); err != nil { + return err + } + } + if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { + return err + } + return nil +} + +func initWithFiles(files ...FileApplier) layerInit { + return func(root string) error { + for _, f := range files { + if err := f.ApplyFile(root); err != nil { + return err + } + } + return nil + } +} + +func getCachedLayer(l Layer) *roLayer { + if rl, ok := l.(*referencedCacheLayer); ok { + return rl.roLayer + } + return l.(*roLayer) +} + +func getMountLayer(l RWLayer) *mountedLayer { + return l.(*referencedRWLayer).mountedLayer +} + +func createMetadata(layers ...Layer) []Metadata { + metadata := make([]Metadata, len(layers)) + for i := range layers { + size, err := layers[i].Size() + if err != nil { + panic(err) + } + + metadata[i].ChainID = layers[i].ChainID() + metadata[i].DiffID = layers[i].DiffID() + metadata[i].Size = size + metadata[i].DiffSize = getCachedLayer(layers[i]).size + } + + return metadata +} + +func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { + if len(metadata) != len(expectedMetadata) { + t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) + } + + for i := range metadata { + if metadata[i] != expectedMetadata[i] { + t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) + } + } + if t.Failed() { + t.FailNow() + } +} + +func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { + layerCount := len(ls.(*layerStore).layerMap) + expectedMetadata := createMetadata(removed...) + metadata, err := ls.Release(layer) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, expectedMetadata) + + if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } +} + +func cacheID(l Layer) string { + return getCachedLayer(l).cacheID +} + +func assertLayerEqual(t *testing.T, l1, l2 Layer) { + if l1.ChainID() != l2.ChainID() { + t.Fatalf("Mismatched ChainID: %s vs %s", l1.ChainID(), l2.ChainID()) + } + if l1.DiffID() != l2.DiffID() { + t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) + } + + size1, err := l1.Size() + if err != nil { + t.Fatal(err) + } + + size2, err := l2.Size() + if err != nil { + t.Fatal(err) + } + + if size1 != size2 { + t.Fatalf("Mismatched size: %d vs %d", size1, size2) + } + + if cacheID(l1) != cacheID(l2) { + t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) + } + + p1 := l1.Parent() + p2 := l2.Parent() + if p1 != nil && p2 != nil { + assertLayerEqual(t, p1, p2) + } else if p1 != nil || p2 != nil { + t.Fatalf("Mismatched parents: %v vs %v", p1, p2) + } +} + +func TestMountAndRegister(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + size, _ := layer.Size() + t.Logf("Layer size: %d", size) + + mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), nil) + if err != nil { + t.Fatal(err) + } + + path2, err := mount2.Mount("") + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + + if expected := "some test data"; string(b) != expected { + t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) + } + + if err := mount2.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(mount2); err != nil { + t.Fatal(err) + } +} + +func TestLayerRelease(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + t.Logf("Layer1: %s", layer1.ChainID()) + t.Logf("Layer2: %s", layer2.ChainID()) + t.Logf("Layer3a: %s", layer3a.ChainID()) + t.Logf("Layer3b: %s", layer3b.ChainID()) + + if expected := 4; len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } + + releaseAndCheckDeleted(t, ls, layer3b, layer3b) + releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) +} + +func TestStoreRestore(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + t.Fatal(err) + } + + if err := m.Unmount(); err != nil { + t.Fatal(err) + } + + ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver, runtime.GOOS) + if err != nil { + t.Fatal(err) + } + + layer3b, err := ls2.Get(layer3.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertLayerEqual(t, layer3b, layer3) + + // Create again with same name, should return error + if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + m2, err := ls2.GetRWLayer("some-mount_name") + if err != nil { + t.Fatal(err) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + if expected := "nothing here"; string(b) != expected { + t.Fatalf("Unexpected content %q, expected %q", string(b), expected) + } + + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) +} + +func TestTarStreamStability(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), + } + addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) + files2 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), + newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + // hack layer to add file + p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") + if err != nil { + t.Fatal(err) + } + + if err := addedFile.ApplyFile(p); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + id1 := layer1.ChainID() + t.Logf("Layer 1: %s", layer1.ChainID()) + t.Logf("Layer 2: %s", layer2.ChainID()) + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar2, layer2) + + layer1b, err := ls.Get(id1) + if err != nil { + t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar1, layer1b) + + if _, err := ls.Release(layer1b); err != nil { + t.Fatal(err) + } +} + +func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { + expectedDigest := digest.FromBytes(expected) + + if digest.Digest(layer.DiffID()) != expectedDigest { + t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) + } + + ts, err := layer.TarStream() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + actual, err := ioutil.ReadAll(ts) + if err != nil { + t.Fatal(err) + } + + if len(actual) != len(expected) { + logByteDiff(t, actual, expected) + t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) + } + + actualDigest := digest.FromBytes(actual) + + if actualDigest != expectedDigest { + logByteDiff(t, actual, expected) + t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) + } +} + +const maxByteLog = 4 * 1024 + +func logByteDiff(t *testing.T, actual, expected []byte) { + d1, d2 := byteDiff(actual, expected) + if len(d1) == 0 && len(d2) == 0 { + return + } + + prefix := len(actual) - len(d1) + if len(d1) > maxByteLog || len(d2) > maxByteLog { + t.Logf("Byte diff after %d matching bytes", prefix) + } else { + t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) + } +} + +// byteDiff returns the differing bytes after the matching prefix +func byteDiff(b1, b2 []byte) ([]byte, []byte) { + i := 0 + for i < len(b1) && i < len(b2) { + if b1[i] != b2[i] { + break + } + i++ + } + + return b1[i:], b2[i:] +} + +func tarFromFiles(files ...FileApplier) ([]byte, error) { + td, err := ioutil.TempDir("", "tar-") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + + for _, f := range files { + if err := f.ApplyFile(td); err != nil { + return nil, err + } + } + + r, err := archive.Tar(td, archive.Uncompressed) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, r); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// assertReferences asserts that all the references are to the same +// image and represent the full set of references to that image. +func assertReferences(t *testing.T, references ...Layer) { + if len(references) == 0 { + return + } + base := references[0].(*referencedCacheLayer).roLayer + seenReferences := map[Layer]struct{}{ + references[0]: {}, + } + for i := 1; i < len(references); i++ { + other := references[i].(*referencedCacheLayer).roLayer + if base != other { + t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) + } + if _, ok := base.references[references[i]]; !ok { + t.Fatalf("Reference not part of reference list: %v", references[i]) + } + if _, ok := seenReferences[references[i]]; ok { + t.Fatalf("Duplicated reference %v", references[i]) + } + } + if rc := len(base.references); rc != len(references) { + t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) + } +} + +func TestRegisterExistingLayer(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layerFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), + } + + li := initWithFiles(baseFiles...) + layer1, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + tar1, err := tarFromFiles(layerFiles...) + if err != nil { + t.Fatal(err) + } + + layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer2a, layer2b) +} + +func TestTarStreamVerification(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, tmpdir, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0644), + } + files2 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0600), // different perm + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + id1 := digest.Digest(layer1.ChainID()) + id2 := digest.Digest(layer2.ChainID()) + + // Replace tar data files + src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer src.Close() + + dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + t.Fatal(err) + } + + src.Sync() + dst.Sync() + + ts, err := layer2.TarStream() + if err != nil { + t.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, ts) + if err == nil { + t.Fatal("expected data verification to fail") + } + if !strings.Contains(err.Error(), "could not verify layer data") { + t.Fatalf("wrong error returned from tarstream: %q", err) + } +} diff --git a/layer/layer_unix.go b/layer/layer_unix.go new file mode 100644 index 0000000..776b78a --- /dev/null +++ b/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd solaris + +package layer + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/layer/layer_unix_test.go b/layer/layer_unix_test.go new file mode 100644 index 0000000..9aa1afd --- /dev/null +++ b/layer/layer_unix_test.go @@ -0,0 +1,71 @@ +// +build !windows + +package layer + +import "testing" + +func graphDiffSize(ls Store, l Layer) (int64, error) { + cl := getCachedLayer(l) + var parent string + if cl.parent != nil { + parent = cl.parent.cacheID + } + return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) +} + +// Unix as Windows graph driver does not support Changes which is indirectly +// invoked by calling DiffSize on the driver +func TestLayerSize(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Added contents") + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) + if err != nil { + t.Fatal(err) + } + + layer1DiffSize, err := graphDiffSize(ls, layer1) + if err != nil { + t.Fatal(err) + } + + if int(layer1DiffSize) != len(content1) { + t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) + } + + layer1Size, err := layer1.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1); int(layer1Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) + } + + layer2DiffSize, err := graphDiffSize(ls, layer2) + if err != nil { + t.Fatal(err) + } + + if int(layer2DiffSize) != len(content2) { + t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) + } + + layer2Size, err := layer2.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1) + len(content2); int(layer2Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) + } + +} diff --git a/layer/layer_windows.go b/layer/layer_windows.go new file mode 100644 index 0000000..a1c1953 --- /dev/null +++ b/layer/layer_windows.go @@ -0,0 +1,34 @@ +package layer + +import "errors" + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path, nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} diff --git a/layer/migration.go b/layer/migration.go new file mode 100644 index 0000000..4803a1a --- /dev/null +++ b/layer/migration.go @@ -0,0 +1,256 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.Digester() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/layer/migration_test.go b/layer/migration_test.go new file mode 100644 index 0000000..7364e6c --- /dev/null +++ b/layer/migration_test.go @@ -0,0 +1,435 @@ +package layer + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +func writeTarSplitFile(name string, tarContent []byte) error { + f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + fz := gzip.NewWriter(f) + + metaPacker := storage.NewJSONPacker(fz) + defer fz.Close() + + rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) + if err != nil { + return err + } + + if _, err := io.Copy(ioutil.Discard, rdr); err != nil { + return err + } + + return nil +} + +func TestLayerMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + tar1, err := tarFromFiles(layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(layer2Files...) + if err != nil { + t.Fatal(err) + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + + graphID1 := stringid.GenerateRandomID() + if err := graph.Create(graphID1, "", nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID1, "", bytes.NewReader(tar1)); err != nil { + t.Fatal(err) + } + + tf1 := filepath.Join(td, "tar1.json.gz") + if err := writeTarSplitFile(tf1, tar1); err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + graphID2 := stringid.GenerateRandomID() + if err := graph.Create(graphID2, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID2, graphID1, bytes.NewReader(tar2)); err != nil { + t.Fatal(err) + } + + tf2 := filepath.Join(td, "tar2.json.gz") + if err := writeTarSplitFile(tf2, tar2); err != nil { + t.Fatal(err) + } + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { + t, err := tarFromFiles(files...) + if err != nil { + return nil, err + } + + if err := graph.Create(graphID, parentID, nil); err != nil { + return nil, err + } + if _, err := graph.ApplyDiff(graphID, parentID, bytes.NewReader(t)); err != nil { + return nil, err + } + + ar, err := graph.Diff(graphID, parentID) + if err != nil { + return nil, err + } + defer ar.Close() + + return ioutil.ReadAll(ar) +} + +func TestLayerMigrationNoTarsplit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + graphID1 := stringid.GenerateRandomID() + graphID2 := stringid.GenerateRandomID() + + tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) + if err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), Platform(runtime.GOOS)) + if err != nil { + t.Fatal(err) + } + + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func TestMountMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing (obvious - paths... needs porting) + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + initFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte{}, 0644), + newTestFile("/etc/resolv.conf", []byte{}, 0644), + } + mountFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), + } + + initTar, err := tarFromFiles(initFiles...) + if err != nil { + t.Fatal(err) + } + + mountTar, err := tarFromFiles(mountFiles...) + if err != nil { + t.Fatal(err) + } + + graph := ls.(*layerStore).driver + + layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) + if err != nil { + t.Fatal(err) + } + + graphID1 := layer1.(*referencedCacheLayer).cacheID + + containerID := stringid.GenerateRandomID() + containerInit := fmt.Sprintf("%s-init", containerID) + + if err := graph.Create(containerInit, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerInit, graphID1, bytes.NewReader(initTar)); err != nil { + t.Fatal(err) + } + + if err := graph.Create(containerID, containerInit, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerID, containerInit, bytes.NewReader(mountTar)); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { + t.Fatal(err) + } + + rwLayer1, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if _, err := rwLayer1.Mount(""); err != nil { + t.Fatal(err) + } + + changes, err := rwLayer1.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 5; len(changes) != expected { + t.Logf("Changes %#v", changes) + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/etc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/etc/hosts", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/root", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/root/.bashrc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[4], archive.Change{ + Path: "/root/testfile1.txt", + Kind: archive.ChangeAdd, + }) + + if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + rwLayer2, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) { + t.Fatal("Expected same layer from get with same name as from migrate") + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if metadata, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) + } + + if err := rwLayer1.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { + t.Fatal(err) + } + + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + metadata, err := ls.ReleaseRWLayer(rwLayer2) + if err != nil { + t.Fatal(err) + } + if len(metadata) == 0 { + t.Fatal("Expected base layer to be deleted when deleting mount") + } + + assertMetadata(t, metadata, createMetadata(layer1)) +} diff --git a/layer/mount_test.go b/layer/mount_test.go new file mode 100644 index 0000000..f5799e7 --- /dev/null +++ b/layer/mount_test.go @@ -0,0 +1,239 @@ +package layer + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +func TestMountInit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) + initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefile) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } + m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), rwLayerOpts) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + if expected := "init data!"; string(b) != expected { + t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) + } + + if fi.Mode().Perm() != 0777 { + t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) + } +} + +func TestMountSize(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Mutable contents") + contentInit := []byte("why am I excluded from the size ☹") + + li := initWithFiles(newTestFile("file1", content1, 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return newTestFile("file-init", contentInit, 0777).ApplyFile(root) + } + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } + + m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), rwLayerOpts) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { + t.Fatal(err) + } + + mountSize, err := m.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content2); int(mountSize) != expected { + t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) + } +} + +func TestMountChanges(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefiles := []FileApplier{ + newTestFile("testfile1.txt", []byte("base data!"), 0644), + newTestFile("testfile2.txt", []byte("base data!"), 0644), + newTestFile("testfile3.txt", []byte("base data!"), 0644), + } + initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefiles...) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } + + m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), rwLayerOpts) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + t.Fatal(err) + } + + if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + t.Fatal(err) + } + + changes, err := m.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 4; len(changes) != expected { + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/testfile1.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/testfile2.txt", + Kind: archive.ChangeDelete, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/testfile3.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/testfile4.txt", + Kind: archive.ChangeAdd, + }) +} + +func assertChange(t *testing.T, actual, expected archive.Change) { + if actual.Path != expected.Path { + t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) + } + if actual.Kind != expected.Kind { + t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) + } +} + +func sortChanges(changes []archive.Change) { + cs := &changeSorter{ + changes: changes, + } + sort.Sort(cs) +} + +type changeSorter struct { + changes []archive.Change +} + +func (cs *changeSorter) Len() int { + return len(cs.changes) +} + +func (cs *changeSorter) Swap(i, j int) { + cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] +} + +func (cs *changeSorter) Less(i, j int) bool { + return cs.changes[i].Path < cs.changes[j].Path +} diff --git a/layer/mounted_layer.go b/layer/mounted_layer.go new file mode 100644 index 0000000..a5cfcfa --- /dev/null +++ b/layer/mounted_layer.go @@ -0,0 +1,99 @@ +package layer + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/layer/ro_layer.go b/layer/ro_layer.go new file mode 100644 index 0000000..687b924 --- /dev/null +++ b/layer/ro_layer.go @@ -0,0 +1,190 @@ +package layer + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + platform Platform + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarantees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + rc, err := rl.layerStore.getTarStream(rl) + if err != nil { + return nil, err + } + + vrc, err := newVerifiedReadCloser(rc, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return vrc, nil +} + +// TarSeekStream for roLayer guarantees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarSeekStream() (ioutils.ReadSeekCloser, error) { + return rl.layerStore.getTarSeekStream(rl) +} + +// TarStreamFrom does not make any guarantees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + if err := tx.SetPlatform(layer.platform); err != nil { + return err + } + + return nil +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: dgst.Verifier(), + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/layer/ro_layer_unix.go b/layer/ro_layer_unix.go new file mode 100644 index 0000000..1b36856 --- /dev/null +++ b/layer/ro_layer_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package layer + +func (rl *roLayer) Platform() Platform { + return "" +} diff --git a/layer/ro_layer_windows.go b/layer/ro_layer_windows.go new file mode 100644 index 0000000..6679bdf --- /dev/null +++ b/layer/ro_layer_windows.go @@ -0,0 +1,16 @@ +package layer + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} + +func (rl *roLayer) Platform() Platform { + if rl.platform == "" { + return "windows" + } + return rl.platform +} diff --git a/libcontainerd/client.go b/libcontainerd/client.go new file mode 100644 index 0000000..c9004b8 --- /dev/null +++ b/libcontainerd/client.go @@ -0,0 +1,46 @@ +package libcontainerd + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" +) + +// clientCommon contains the platform agnostic fields used in the client structure +type clientCommon struct { + backend Backend + containers map[string]*container + locker *locker.Locker + mapMutex sync.RWMutex // protects read/write operations from containers map +} + +func (clnt *client) lock(containerID string) { + clnt.locker.Lock(containerID) +} + +func (clnt *client) unlock(containerID string) { + clnt.locker.Unlock(containerID) +} + +// must hold a lock for cont.containerID +func (clnt *client) appendContainer(cont *container) { + clnt.mapMutex.Lock() + clnt.containers[cont.containerID] = cont + clnt.mapMutex.Unlock() +} +func (clnt *client) deleteContainer(containerID string) { + clnt.mapMutex.Lock() + delete(clnt.containers, containerID) + clnt.mapMutex.Unlock() +} + +func (clnt *client) getContainer(containerID string) (*container, error) { + clnt.mapMutex.RLock() + container, ok := clnt.containers[containerID] + defer clnt.mapMutex.RUnlock() + if !ok { + return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error + } + return container, nil +} diff --git a/libcontainerd/client_linux.go b/libcontainerd/client_linux.go new file mode 100644 index 0000000..a6986b5 --- /dev/null +++ b/libcontainerd/client_linux.go @@ -0,0 +1,619 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + containerd_runtime_types "github.com/containerd/containerd/runtime" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (pid int, err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + + spec, err := container.spec() + if err != nil { + return -1, err + } + sp := spec.Process + sp.Args = specp.Args + sp.Terminal = specp.Terminal + if len(specp.Env) > 0 { + sp.Env = specp.Env + } + if specp.Cwd != nil { + sp.Cwd = *specp.Cwd + } + if specp.User != nil { + sp.User = specs.User{ + UID: specp.User.UID, + GID: specp.User.GID, + AdditionalGids: specp.User.AdditionalGids, + } + } + if specp.Capabilities != nil { + sp.Capabilities.Bounding = specp.Capabilities + sp.Capabilities.Effective = specp.Capabilities + sp.Capabilities.Inheritable = specp.Capabilities + sp.Capabilities.Permitted = specp.Capabilities + } + + p := container.newProcess(processFriendlyName) + + r := &containerd.AddProcessRequest{ + Args: sp.Args, + Cwd: sp.Cwd, + Terminal: sp.Terminal, + Id: containerID, + Env: sp.Env, + User: &containerd.User{ + Uid: sp.User.UID, + Gid: sp.User.GID, + AdditionalGids: sp.User.AdditionalGids, + }, + Pid: processFriendlyName, + Stdin: p.fifo(syscall.Stdin), + Stdout: p.fifo(syscall.Stdout), + Stderr: p.fifo(syscall.Stderr), + Capabilities: sp.Capabilities.Effective, + ApparmorProfile: sp.ApparmorProfile, + SelinuxLabel: sp.SelinuxLabel, + NoNewPrivileges: sp.NoNewPrivileges, + Rlimits: convertRlimits(sp.Rlimits), + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := p.openFifos(fifoCtx, sp.Terminal) + if err != nil { + return -1, err + } + + resp, err := clnt.remote.apiClient.AddProcess(ctx, r) + if err != nil { + p.closeFifos(iopipe) + return -1, err + } + + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + if err2 := p.sendCloseStdin(); err == nil { + err = err2 + } + }) + return err + }) + + container.processes[processFriendlyName] = p + + if err := attachStdio(*iopipe); err != nil { + p.closeFifos(iopipe) + return -1, err + } + + return int(resp.SystemPid), nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: pid, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: containerID, + Pid: processFriendlyName, + Width: uint32(width), + Height: uint32(height), + }) + return err +} + +func (clnt *client) Pause(containerID string) error { + return clnt.setState(containerID, StatePause) +} + +func (clnt *client) setState(containerID, state string) error { + clnt.lock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + clnt.unlock(containerID) + return err + } + if container.systemPid == 0 { + clnt.unlock(containerID) + return fmt.Errorf("No active process for container %s", containerID) + } + st := "running" + if state == StatePause { + st = "paused" + } + chstate := make(chan struct{}) + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Status: st, + }) + if err != nil { + clnt.unlock(containerID) + return err + } + container.pauseMonitor.append(state, chstate) + clnt.unlock(containerID) + <-chstate + return nil +} + +func (clnt *client) Resume(containerID string) error { + return clnt.setState(containerID, StateResume) +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) + if err != nil { + return nil, err + } + return (*Stats)(resp), nil +} + +// Take care of the old 1.11.0 behavior in case the version upgrade +// happened without a clean daemon shutdown +func (clnt *client) cleanupOldRootfs(containerID string) { + // Unmount and delete the bundle folder + if mts, err := mount.GetMounts(); err == nil { + for _, mts := range mts { + if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { + if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { + os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) + } + break + } + } + } +} + +func (clnt *client) setExited(containerID string, exitCode uint32) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + err := clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: exitCode, + }}) + + clnt.cleanupOldRootfs(containerID) + + return err +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + cont, err := clnt.getContainerdContainer(containerID) + if err != nil { + return nil, err + } + pids := make([]int, len(cont.Pids)) + for i, p := range cont.Pids { + pids[i] = int(p) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is a no-op on Linux. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { + resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) + if err != nil { + return nil, err + } + for _, cont := range resp.Containers { + if cont.Id == containerID { + return cont, nil + } + } + return nil, fmt.Errorf("invalid state response") +} + +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + if container.systemPid == 0 { + return fmt.Errorf("No active process for container %s", containerID) + } + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Resources: (*containerd.UpdateResource)(&resources), + }) + if err != nil { + return err + } + return nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + w, ok := clnt.exitNotifiers[containerID] + defer clnt.mapMutex.Unlock() + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +func (clnt *client) restore(cont *containerd.Container, lastEvent *containerd.Event, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("libcontainerd: restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is already active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := container.openFifos(fifoCtx, terminal) + if err != nil { + return err + } + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + }) + return err + }) + + if err := attachStdio(*iopipe); err != nil { + container.closeFifos(iopipe) + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateRestore, + Pid: container.systemPid, + }}) + + if err != nil { + container.closeFifos(iopipe) + return err + } + + if lastEvent != nil { + // This should only be a pause or resume event + if lastEvent.Type == StatePause || lastEvent.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: lastEvent.Type, + Pid: container.systemPid, + }}) + } + + logrus.Warnf("libcontainerd: unexpected backlog event: %#v", lastEvent) + } + + return nil +} + +func (clnt *client) getContainerLastEventSinceTime(id string, tsp *timestamp.Timestamp) (*containerd.Event, error) { + er := &containerd.EventsRequest{ + Timestamp: tsp, + StoredOnly: true, + Id: id, + } + events, err := clnt.remote.apiClient.Events(context.Background(), er) + if err != nil { + logrus.Errorf("libcontainerd: failed to get container events stream for %s: %q", er.Id, err) + return nil, err + } + + var ev *containerd.Event + for { + e, err := events.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + logrus.Errorf("libcontainerd: failed to get container event for %s: %q", id, err) + return nil, err + } + ev = e + logrus.Debugf("libcontainerd: received past event %#v", ev) + } + + return ev, nil +} + +func (clnt *client) getContainerLastEvent(id string) (*containerd.Event, error) { + ev, err := clnt.getContainerLastEventSinceTime(id, clnt.remote.restoreFromTimestamp) + if err == nil && ev == nil { + // If ev is nil and the container is running in containerd, + // we already consumed all the event of the + // container, included the "exit" one. + // Thus, we request all events containerd has in memory for + // this container in order to get the last one (which should + // be an exit event) + logrus.Warnf("libcontainerd: client is out of sync, restore was called on a fully synced container (%s).", id) + // Request all events since beginning of time + t := time.Unix(0, 0) + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: getLastEventSinceTime() failed to convert timestamp: %q", err) + return nil, err + } + + return clnt.getContainerLastEventSinceTime(id, tsp) + } + + return ev, err +} + +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + // Synchronize with live events + clnt.remote.Lock() + defer clnt.remote.Unlock() + // Check that containerd still knows this container. + // + // In the unlikely event that Restore for this container process + // the its past event before the main loop, the event will be + // processed twice. However, this is not an issue as all those + // events will do is change the state of the container to be + // exactly the same. + cont, err := clnt.getContainerdContainer(containerID) + // Get its last event + ev, eerr := clnt.getContainerLastEvent(containerID) + if err != nil || containerd_runtime_types.State(cont.Status) == containerd_runtime_types.Stopped { + if err != nil { + logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err) + } + if ev != nil && (ev.Pid != InitFriendlyName || ev.Type != StateExit) { + // Wait a while for the exit event + timeout := time.NewTimer(10 * time.Second) + tick := time.NewTicker(100 * time.Millisecond) + stop: + for { + select { + case <-timeout.C: + break stop + case <-tick.C: + ev, eerr = clnt.getContainerLastEvent(containerID) + if eerr != nil { + break stop + } + if ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + break stop + } + } + } + timeout.Stop() + tick.Stop() + } + + // get the exit status for this container, if we don't have + // one, indicate an error + ec := uint32(255) + if eerr == nil && ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + ec = ev.Status + } + clnt.setExited(containerID, ec) + + return nil + } + + // container is still alive + if clnt.liveRestore { + if err := clnt.restore(cont, ev, attachStdio, options...); err != nil { + logrus.Errorf("libcontainerd: error restoring %s: %v", containerID, err) + } + return nil + } + + // Kill the container if liveRestore == false + w := clnt.getOrCreateExitNotifier(containerID) + clnt.lock(cont.Id) + container := clnt.newContainer(cont.BundlePath) + container.systemPid = systemPid(cont) + clnt.appendContainer(container) + clnt.unlock(cont.Id) + + container.discardFifos() + + if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { + logrus.Errorf("libcontainerd: error sending sigterm to %v: %v", containerID, err) + } + + // Let the main loop handle the exit event + clnt.remote.Unlock() + + if ev != nil && ev.Type == StatePause { + // resume container, it depends on the main loop, so we do it after Unlock() + logrus.Debugf("libcontainerd: %s was paused, resuming it so it can die", containerID) + if err := clnt.Resume(containerID); err != nil { + return fmt.Errorf("failed to resume container: %v", err) + } + } + + select { + case <-time.After(10 * time.Second): + if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("libcontainerd: error sending sigkill to %v: %v", containerID, err) + } + select { + case <-time.After(2 * time.Second): + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + // relock because of the defer + clnt.remote.Lock() + + clnt.deleteContainer(containerID) + + return clnt.setExited(containerID, uint32(255)) +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.CreateCheckpoint(context.Background(), &containerd.CreateCheckpointRequest{ + Id: containerID, + Checkpoint: &containerd.Checkpoint{ + Name: checkpointID, + Exit: exit, + Tcp: true, + UnixSockets: true, + Shell: false, + EmptyNS: []string{"network"}, + }, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.DeleteCheckpoint(context.Background(), &containerd.DeleteCheckpointRequest{ + Id: containerID, + Name: checkpointID, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return nil, err + } + + resp, err := clnt.remote.apiClient.ListCheckpoint(context.Background(), &containerd.ListCheckpointRequest{ + Id: containerID, + CheckpointDir: checkpointDir, + }) + if err != nil { + return nil, err + } + return (*Checkpoints)(resp), nil +} diff --git a/libcontainerd/client_solaris.go b/libcontainerd/client_solaris.go new file mode 100644 index 0000000..cb93997 --- /dev/null +++ b/libcontainerd/client_solaris.go @@ -0,0 +1,101 @@ +package libcontainerd + +import "golang.org/x/net/context" + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (int, error) { + return -1, nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + return nil +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + return nil +} + +func (clnt *client) Pause(containerID string) error { + return nil +} + +func (clnt *client) Resume(containerID string) error { + return nil +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + return nil, nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + defer clnt.mapMutex.Unlock() + w, ok := clnt.exitNotifiers[containerID] + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + return nil +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + return nil, nil +} + +// Summary returns a summary of the processes running in a container. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Solaris + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return nil +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return nil +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, nil +} diff --git a/libcontainerd/client_unix.go b/libcontainerd/client_unix.go new file mode 100644 index 0000000..6dbf3af --- /dev/null +++ b/libcontainerd/client_unix.go @@ -0,0 +1,141 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/pkg/idtools" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { + root, err := filepath.Abs(clnt.remote.stateDir) + if err != nil { + return "", err + } + if uid == 0 && gid == 0 { + return root, nil + } + p := string(filepath.Separator) + for _, d := range strings.Split(root, string(filepath.Separator))[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAndChown(p, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) { + return "", err + } + } + } + return p, nil +} + +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("Container %s is already active", containerID) + } + + uid, gid, err := getRootIDs(specs.Spec(spec)) + if err != nil { + return err + } + dir, err := clnt.prepareBundleDir(uid, gid) + if err != nil { + return err + } + + container := clnt.newContainer(filepath.Join(dir, containerID), options...) + if err := container.clean(); err != nil { + return err + } + + defer func() { + if err != nil { + container.clean() + clnt.deleteContainer(containerID) + } + }() + + if err := idtools.MkdirAllAndChown(container.dir, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.Create(filepath.Join(container.dir, configFilename)) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + return container.start(&spec, checkpoint, checkpointDir, attachStdio) +} + +func (clnt *client) Signal(containerID string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: InitFriendlyName, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) newContainer(dir string, options ...CreateOption) *container { + container := &container{ + containerCommon: containerCommon{ + process: process{ + dir: dir, + processCommon: processCommon{ + containerID: filepath.Base(dir), + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + } + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: newContainer(): %v", err) + } + } + return container +} + +type exitNotifier struct { + id string + client *client + c chan struct{} + once sync.Once +} + +func (en *exitNotifier) close() { + en.once.Do(func() { + close(en.c) + en.client.mapMutex.Lock() + if en == en.client.exitNotifiers[en.id] { + delete(en.client.exitNotifiers, en.id) + } + en.client.mapMutex.Unlock() + }) +} +func (en *exitNotifier) wait() <-chan struct{} { + return en.c +} diff --git a/libcontainerd/client_windows.go b/libcontainerd/client_windows.go new file mode 100644 index 0000000..455e8e5 --- /dev/null +++ b/libcontainerd/client_windows.go @@ -0,0 +1,754 @@ +package libcontainerd + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/sysinfo" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +type client struct { + clientCommon + + // Platform specific properties below here (none presently on Windows) +} + +// Win32 error codes that are used for various workarounds +// These really should be ALL_CAPS to match golangs syscall library and standard +// Win32 error conventions, but golint insists on CamelCase. +const ( + CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string + ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started + ErrorBadPathname = syscall.Errno(161) // The specified path is invalid + ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object +) + +// defaultOwner is a tag passed to HCS to allow it to differentiate between +// container creator management stacks. We hard code "docker" in the case +// of docker. +const defaultOwner = "docker" + +// Create is the entrypoint to create a container from a spec, and if successfully +// created, start it too. Table below shows the fields required for HCS JSON calling parameters, +// where if not populated, is omitted. +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | | Isolation=Process | Isolation=Hyper-V | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | VolumePath | \\?\\Volume{GUIDa} | | +// | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | +// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | +// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// +// Isolation=Process example: +// +// { +// "SystemType": "Container", +// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Owner": "docker", +// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", +// "IgnoreFlushesDuringBoot": true, +// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "5e0055c814a6", +// "MappedDirectories": [], +// "HvPartition": false, +// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], +// "Servicing": false +//} +// +// Isolation=Hyper-V example: +// +//{ +// "SystemType": "Container", +// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", +// "Owner": "docker", +// "IgnoreFlushesDuringBoot": true, +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "475c2c58933b", +// "MappedDirectories": [], +// "HvPartition": true, +// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], +// "DNSSearchList": "a.com,b.com,c.com", +// "HvRuntime": { +// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" +// }, +// "Servicing": false +//} +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if b, err := json.Marshal(spec); err == nil { + logrus.Debugln("libcontainerd: client.Create() with spec", string(b)) + } + osName := spec.Platform.OS + if osName == "windows" { + return clnt.createWindows(containerID, checkpoint, checkpointDir, spec, attachStdio, options...) + } + return clnt.createLinux(containerID, checkpoint, checkpointDir, spec, attachStdio, options...) +} + +func (clnt *client) createWindows(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + configuration := &hcsshim.ContainerConfig{ + SystemType: "Container", + Name: containerID, + Owner: defaultOwner, + IgnoreFlushesDuringBoot: false, + HostName: spec.Hostname, + HvPartition: false, + } + + if spec.Windows.Resources != nil { + if spec.Windows.Resources.CPU != nil { + if spec.Windows.Resources.CPU.Count != nil { + // This check is being done here rather than in adaptContainerSettings + // because we don't want to update the HostConfig in case this container + // is moved to a host with more CPUs than this one. + cpuCount := *spec.Windows.Resources.CPU.Count + hostCPUCount := uint64(sysinfo.NumCPU()) + if cpuCount > hostCPUCount { + logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) + cpuCount = hostCPUCount + } + configuration.ProcessorCount = uint32(cpuCount) + } + if spec.Windows.Resources.CPU.Shares != nil { + configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) + } + if spec.Windows.Resources.CPU.Maximum != nil { + configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum) + } + } + if spec.Windows.Resources.Memory != nil { + if spec.Windows.Resources.Memory.Limit != nil { + configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 + } + } + if spec.Windows.Resources.Storage != nil { + if spec.Windows.Resources.Storage.Bps != nil { + configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps + } + if spec.Windows.Resources.Storage.Iops != nil { + configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops + } + } + } + + var layerOpt *LayerOption + for _, option := range options { + if s, ok := option.(*ServicingOption); ok { + configuration.Servicing = s.IsServicing + continue + } + if f, ok := option.(*FlushOption); ok { + configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot + continue + } + if h, ok := option.(*HyperVIsolationOption); ok { + configuration.HvPartition = h.IsHyperV + continue + } + if l, ok := option.(*LayerOption); ok { + layerOpt = l + } + if n, ok := option.(*NetworkEndpointsOption); ok { + configuration.EndpointList = n.Endpoints + configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery + if n.DNSSearchList != nil { + configuration.DNSSearchList = strings.Join(n.DNSSearchList, ",") + } + configuration.NetworkSharedContainerName = n.NetworkSharedContainerID + continue + } + if c, ok := option.(*CredentialsOption); ok { + configuration.Credentials = c.Credentials + continue + } + } + + // We must have a layer option with at least one path + if layerOpt == nil || layerOpt.LayerPaths == nil { + return fmt.Errorf("no layer option or paths were supplied to the runtime") + } + + if configuration.HvPartition { + // Find the upper-most utility VM image, since the utility VM does not + // use layering in RS1. + // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. + var uvmImagePath string + for _, path := range layerOpt.LayerPaths { + fullPath := filepath.Join(path, "UtilityVM") + _, err := os.Stat(fullPath) + if err == nil { + uvmImagePath = fullPath + break + } + if !os.IsNotExist(err) { + return err + } + } + if uvmImagePath == "" { + return errors.New("utility VM image could not be found") + } + configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} + } else { + configuration.VolumePath = spec.Root.Path + } + + configuration.LayerFolderPath = layerOpt.LayerFolderPath + + for _, layerPath := range layerOpt.LayerPaths { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: layerPath, + }) + } + + // Add the mounts (volumes, bind mounts etc) to the structure + mds := make([]hcsshim.MappedDir, len(spec.Mounts)) + for i, mount := range spec.Mounts { + mds[i] = hcsshim.MappedDir{ + HostPath: mount.Source, + ContainerPath: mount.Destination, + ReadOnly: false, + } + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + mds[i].ReadOnly = true + } + } + } + configuration.MappedDirectories = mds + + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + container := &container{ + containerCommon: containerCommon{ + process: process{ + processCommon: processCommon{ + containerID: containerID, + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + ociSpec: spec, + hcsContainer: hcsContainer, + } + + container.options = options + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: %v", err) + } + } + + // Call start, and if it fails, delete the container from our + // internal structure, start will keep HCS in sync by deleting the + // container there. + logrus.Debugf("libcontainerd: createWindows() id=%s, Calling start()", containerID) + if err := container.start(attachStdio); err != nil { + clnt.deleteContainer(containerID) + return err + } + + logrus.Debugf("libcontainerd: createWindows() id=%s completed successfully", containerID) + return nil + +} + +func (clnt *client) createLinux(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + logrus.Debugf("libcontainerd: createLinux(): containerId %s ", containerID) + + // TODO @jhowardmsft LCOW Support: This needs to be configurable, not hard-coded. + // However, good-enough for the LCOW bring-up. + configuration := &hcsshim.ContainerConfig{ + HvPartition: true, + Name: containerID, + SystemType: "container", + ContainerType: "linux", + Owner: defaultOwner, + TerminateOnLastHandleClosed: true, + HvRuntime: &hcsshim.HvRuntime{ + ImagePath: `c:\Program Files\Linux Containers`, + LinuxKernelFile: `bootx64.efi`, + LinuxInitrdFile: `initrd.img`, + }, + } + + var layerOpt *LayerOption + for _, option := range options { + if l, ok := option.(*LayerOption); ok { + layerOpt = l + } + } + + // We must have a layer option with at least one path + if layerOpt == nil || layerOpt.LayerPaths == nil { + return fmt.Errorf("no layer option or paths were supplied to the runtime") + } + + // LayerFolderPath (writeable layer) + Layers (Guid + path) + configuration.LayerFolderPath = layerOpt.LayerFolderPath + for _, layerPath := range layerOpt.LayerPaths { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: filepath.Join(layerPath, "layer.vhd"), + }) + } + + for _, option := range options { + if n, ok := option.(*NetworkEndpointsOption); ok { + configuration.EndpointList = n.Endpoints + configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery + if n.DNSSearchList != nil { + configuration.DNSSearchList = strings.Join(n.DNSSearchList, ",") + } + configuration.NetworkSharedContainerName = n.NetworkSharedContainerID + break + } + } + + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + container := &container{ + containerCommon: containerCommon{ + process: process{ + processCommon: processCommon{ + containerID: containerID, + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + ociSpec: spec, + hcsContainer: hcsContainer, + } + + container.options = options + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: createLinux() %v", err) + } + } + + // Call start, and if it fails, delete the container from our + // internal structure, start will keep HCS in sync by deleting the + // container there. + logrus.Debugf("libcontainerd: createLinux() id=%s, Calling start()", containerID) + if err := container.start(attachStdio); err != nil { + clnt.deleteContainer(containerID) + return err + } + + logrus.Debugf("libcontainerd: createLinux() id=%s completed successfully", containerID) + return nil +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := hcsshim.ProcessConfig{ + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: !procToAdd.Terminal, + } + if procToAdd.Terminal { + createProcessParms.EmulateConsole = true + createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) + } + + // Take working directory from the process to add if it is defined, + // otherwise take from the first process. + if procToAdd.Cwd != "" { + createProcessParms.WorkingDirectory = procToAdd.Cwd + } else { + createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd + } + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) + if container.ociSpec.Platform.OS == "windows" { + createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") + } else { + createProcessParms.CommandArgs = procToAdd.Args + } + createProcessParms.User = procToAdd.User.Username + + logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) + + // Start the command running in the container. + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) + return -1, err + } + + pid := newProcess.Pid() + + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) + return -1, err + } + + iopipe := &IOPipe{Terminal: procToAdd.Terminal} + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + proc := &process{ + processCommon: processCommon{ + containerID: containerID, + friendlyName: processFriendlyName, + client: clnt, + systemPid: uint32(pid), + }, + hcsProcess: newProcess, + } + + // Add the process to the container's list of processes + container.processes[processFriendlyName] = proc + + // Tell the engine to attach streams back to the client + if err := attachStdio(*iopipe); err != nil { + return -1, err + } + + // Spin up a go routine waiting for exit to handle cleanup + go container.waitExit(proc, false) + + return pid, nil +} + +// Signal handles `docker stop` on Windows. While Linux has support for +// the full range of signals, signals aren't really implemented on Windows. +// We fake supporting regular stop and -9 to force kill. +func (clnt *client) Signal(containerID string, sig int) error { + var ( + cont *container + err error + ) + + // Get the container as we need it to get the container handle. + clnt.lock(containerID) + defer clnt.unlock(containerID) + if cont, err = clnt.getContainer(containerID); err != nil { + return err + } + + cont.manualStopRequested = true + + logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) + + if syscall.Signal(sig) == syscall.SIGKILL { + // Terminate the compute system + if err := cont.hcsContainer.Terminate(); err != nil { + if !hcsshim.IsPending(err) { + logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) + } + } + } else { + // Shut down the container + if err := cont.hcsContainer.Shutdown(); err != nil { + if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) { + // ignore errors + logrus.Warnf("libcontainerd: failed to shutdown container %s: %q", containerID, err) + } + } + } + + return nil +} + +// While Linux has support for the full range of signals, signals aren't really implemented on Windows. +// We try to terminate the specified process whatever signal is requested. +func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + return p.hcsProcess.Kill() + } + } + + return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) +} + +// Resize handles a CLI event to resize an interactive docker run or docker exec +// window. +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + h, w := uint16(height), uint16(width) + + if processFriendlyName == InitFriendlyName { + logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) + return cont.process.hcsProcess.ResizeConsole(w, h) + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) + return p.hcsProcess.ResizeConsole(w, h) + } + } + + return fmt.Errorf("Resize could not find containerID %s to resize", containerID) + +} + +// Pause handles pause requests for containers +func (clnt *client) Pause(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot pause Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Pause() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StatePause, + }}) +} + +// Resume handles resume requests for containers +func (clnt *client) Resume(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + // This should never happen, since Windows Server Containers cannot be paused + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot resume Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Resume() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateResume, + }}) +} + +// Stats handles stats requests for containers +func (clnt *client) Stats(containerID string) (*Stats, error) { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + s, err := container.hcsContainer.Statistics() + if err != nil { + return nil, err + } + st := Stats(s) + return &st, nil +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error { + logrus.Debugf("libcontainerd: Restore(%s)", containerID) + + // TODO Windows: On RS1, a re-attach isn't possible. + // However, there is a scenario in which there is an issue. + // Consider a background container. The daemon dies unexpectedly. + // HCS will still have the compute service alive and running. + // For consistence, we call in to shoot it regardless if HCS knows about it + // We explicitly just log a warning if the terminate fails. + // Then we tell the backend the container exited. + if hc, err := hcsshim.OpenContainer(containerID); err == nil { + const terminateTimeout = time.Minute * 2 + err := hc.Terminate() + + if hcsshim.IsPending(err) { + err = hc.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Warnf("libcontainerd: failed to terminate %s on restore - %q", containerID, err) + return err + } + } + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: 1 << 31, + }}) +} + +// GetPidsForContainer returns a list of process IDs running in a container. +// Not used on Windows. +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + return nil, errors.New("not implemented on Windows") +} + +// Summary returns a summary of the processes running in a container. +// This is present in Windows to support docker top. In linux, the +// engine shells out to ps to get process information. On Windows, as +// the containers could be Hyper-V containers, they would not be +// visible on the container host. However, libcontainerd does have +// that information. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + p, err := container.hcsContainer.ProcessList() + if err != nil { + return nil, err + } + pl := make([]Summary, len(p)) + for i := range p { + pl[i] = Summary(p[i]) + } + return pl, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Windows + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + return &ServerVersion{}, nil +} diff --git a/libcontainerd/container.go b/libcontainerd/container.go new file mode 100644 index 0000000..b403213 --- /dev/null +++ b/libcontainerd/container.go @@ -0,0 +1,13 @@ +package libcontainerd + +const ( + // InitFriendlyName is the name given in the lookup map of processes + // for the first process started in a container. + InitFriendlyName = "init" + configFilename = "config.json" +) + +type containerCommon struct { + process + processes map[string]*process +} diff --git a/libcontainerd/container_unix.go b/libcontainerd/container_unix.go new file mode 100644 index 0000000..be16999 --- /dev/null +++ b/libcontainerd/container_unix.go @@ -0,0 +1,246 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. + pauseMonitor + oom bool + runtime string + runtimeArgs []string +} + +type runtime struct { + path string + args []string +} + +// WithRuntime sets the runtime to be used for the created container +func WithRuntime(path string, args []string) CreateOption { + return runtime{path, args} +} + +func (rt runtime) Apply(p interface{}) error { + if pr, ok := p.(*container); ok { + pr.runtime = rt.path + pr.runtimeArgs = rt.args + } + return nil +} + +func (ctr *container) clean() error { + if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { + return nil + } + if _, err := os.Lstat(ctr.dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + if err := os.RemoveAll(ctr.dir); err != nil { + return err + } + return nil +} + +// cleanProcess removes the fifos used by an additional process. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + if p, ok := ctr.processes[id]; ok { + for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { + if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) { + logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err) + } + } + } + delete(ctr.processes, id) +} + +func (ctr *container) spec() (*specs.Spec, error) { + var spec specs.Spec + dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (ctr *container) start(spec *specs.Spec, checkpoint, checkpointDir string, attachStdio StdioCallback) (err error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ready := make(chan struct{}) + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := ctr.openFifos(fifoCtx, spec.Process.Terminal) + if err != nil { + return err + } + + var stdinOnce sync.Once + + // we need to delay stdin closure after container start or else "stdin close" + // event will be rejected by containerd. + // stdin closure happens in attachStdio + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + go func() { + select { + case <-ready: + case <-ctx.Done(): + } + select { + case <-ready: + if err := ctr.sendCloseStdin(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + default: + } + }() + }) + return err + }) + + r := &containerd.CreateContainerRequest{ + Id: ctr.containerID, + BundlePath: ctr.dir, + Stdin: ctr.fifo(syscall.Stdin), + Stdout: ctr.fifo(syscall.Stdout), + Stderr: ctr.fifo(syscall.Stderr), + Checkpoint: checkpoint, + CheckpointDir: checkpointDir, + // check to see if we are running in ramdisk to disable pivot root + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + Runtime: ctr.runtime, + RuntimeArgs: ctr.runtimeArgs, + } + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + ctr.closeFifos(iopipe) + return err + } + + resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) + if err != nil { + ctr.closeFifos(iopipe) + return err + } + ctr.systemPid = systemPid(resp.Container) + close(ready) + + return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, + }}) + +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + dir: ctr.dir, + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +func (ctr *container) handleEvent(e *containerd.Event) error { + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + switch e.Type { + case StateExit, StatePause, StateResume, StateOOM: + st := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: e.Type, + ExitCode: e.Status, + }, + OOMKilled: e.Type == StateExit && ctr.oom, + } + if e.Type == StateOOM { + ctr.oom = true + } + if e.Type == StateExit && e.Pid != InitFriendlyName { + st.ProcessID = e.Pid + st.State = StateExitProcess + } + + // Remove process from list if we have exited + switch st.State { + case StateExit: + ctr.clean() + ctr.client.deleteContainer(e.Id) + case StateExitProcess: + ctr.cleanProcess(st.ProcessID) + } + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Errorf("libcontainerd: backend.StateChanged(): %v", err) + } + if e.Type == StatePause || e.Type == StateResume { + ctr.pauseMonitor.handle(e.Type) + } + if e.Type == StateExit { + if en := ctr.client.getExitNotifier(e.Id); en != nil { + en.close() + } + } + }) + + default: + logrus.Debugf("libcontainerd: event unhandled: %+v", e) + } + return nil +} + +// discardFifos attempts to fully read the container fifos to unblock processes +// that may be blocked on the writer side. +func (ctr *container) discardFifos() { + ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) + for _, i := range []int{syscall.Stdout, syscall.Stderr} { + f, err := fifo.OpenFifo(ctx, ctr.fifo(i), syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + logrus.Warnf("error opening fifo %v for discarding: %+v", f, err) + continue + } + go func() { + io.Copy(ioutil.Discard, f) + }() + } +} diff --git a/libcontainerd/container_windows.go b/libcontainerd/container_windows.go new file mode 100644 index 0000000..af3e0ef --- /dev/null +++ b/libcontainerd/container_windows.go @@ -0,0 +1,330 @@ +package libcontainerd + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "syscall" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runtime-spec/specs-go" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. There are none presently on Windows. + options []CreateOption + + // The ociSpec is required, as client.Create() needs a spec, + // but can be called from the RestartManager context which does not + // otherwise have access to the Spec + ociSpec specs.Spec + + manualStopRequested bool + hcsContainer hcsshim.Container +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +// start starts a created container. +// Caller needs to lock container ID before calling this method. +func (ctr *container) start(attachStdio StdioCallback) error { + var err error + isServicing := false + + for _, option := range ctr.options { + if s, ok := option.(*ServicingOption); ok && s.IsServicing { + isServicing = true + } + } + + // Start the container. If this is a servicing container, this call will block + // until the container is done with the servicing execution. + logrus.Debugln("libcontainerd: starting container ", ctr.containerID) + if err = ctr.hcsContainer.Start(); err != nil { + logrus.Errorf("libcontainerd: failed to start container: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Start. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed Start by calling Terminate") + } + return err + } + + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := &hcsshim.ProcessConfig{ + EmulateConsole: ctr.ociSpec.Process.Terminal, + WorkingDirectory: ctr.ociSpec.Process.Cwd, + CreateStdInPipe: !isServicing, + CreateStdOutPipe: !isServicing, + CreateStdErrPipe: !ctr.ociSpec.Process.Terminal && !isServicing, + } + createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width) + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) + if ctr.ociSpec.Platform.OS == "windows" { + createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") + } else { + createProcessParms.CommandArgs = ctr.ociSpec.Process.Args + } + createProcessParms.User = ctr.ociSpec.Process.User.Username + + // LCOW requires the raw OCI spec passed through HCS and onwards to GCS for the utility VM. + if system.LCOWSupported() && ctr.ociSpec.Platform.OS == "linux" { + ociBuf, err := json.Marshal(ctr.ociSpec) + if err != nil { + return err + } + ociRaw := json.RawMessage(ociBuf) + createProcessParms.OCISpecification = &ociRaw + } + + // Start the command running in the container. + newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: CreateProcess() failed %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed CreateProcess. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed CreateProcess by calling Terminate") + } + return err + } + + pid := newProcess.Pid() + + // Save the hcs Process and PID + ctr.process.friendlyName = InitFriendlyName + ctr.process.hcsProcess = newProcess + + // If this is a servicing container, wait on the process synchronously here and + // if it succeeds, wait for it cleanly shutdown and merge into the parent container. + if isServicing { + exitCode := ctr.waitProcessExitCode(&ctr.process) + + if exitCode != 0 { + if err := ctr.terminate(); err != nil { + logrus.Warnf("libcontainerd: terminating servicing container %s failed: %s", ctr.containerID, err) + } + return fmt.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode) + } + + return ctr.hcsContainer.WaitTimeout(time.Minute * 5) + } + + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: failed to get stdio pipes: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Stdio. %s", err) + } + return err + } + + iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal} + + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + // Save the PID + logrus.Debugf("libcontainerd: process started - PID %d", pid) + ctr.systemPid = uint32(pid) + + // Spin up a go routine waiting for exit to handle cleanup + go ctr.waitExit(&ctr.process, true) + + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + // OK to return the error here, as waitExit will handle tear-down in HCS + return err + } + + // Tell the docker engine that the container has started. + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft + }} + logrus.Debugf("libcontainerd: start() completed OK, %+v", si) + return ctr.client.backend.StateChanged(ctr.containerID, si) + +} + +// waitProcessExitCode will wait for the given process to exit and return its error code. +func (ctr *container) waitProcessExitCode(process *process) int { + // Block indefinitely for the process to exit. + err := process.hcsProcess.Wait() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: Wait() failed (container may have been killed): %s", err) + } + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + exitCode, err := process.hcsProcess.ExitCode() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: unable to get exit code from container %s", ctr.containerID) + } + // Since we got an error retrieving the exit code, make sure that the code we return + // doesn't incorrectly indicate success. + exitCode = -1 + + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + return exitCode +} + +// waitExit runs as a goroutine waiting for the process to exit. It's +// equivalent to (in the linux containerd world) where events come in for +// state change notifications from containerd. +func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error { + logrus.Debugln("libcontainerd: waitExit() on pid", process.systemPid) + + exitCode := ctr.waitProcessExitCode(process) + // Lock the container while removing the process/container from the list + ctr.client.lock(ctr.containerID) + + if !isFirstProcessToStart { + ctr.cleanProcess(process.friendlyName) + } else { + ctr.client.deleteContainer(ctr.containerID) + } + + // Unlock here so other threads are unblocked + ctr.client.unlock(ctr.containerID) + + // Assume the container has exited + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: uint32(exitCode), + Pid: process.systemPid, + ProcessID: process.friendlyName, + }, + UpdatePending: false, + } + + // But it could have been an exec'd process which exited + if !isFirstProcessToStart { + si.State = StateExitProcess + } else { + // Pending updates is only applicable for WCOW + if ctr.ociSpec.Platform.OS == "windows" { + updatePending, err := ctr.hcsContainer.HasPendingUpdates() + if err != nil { + logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) + } else { + si.UpdatePending = updatePending + } + } + + logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID) + if err := ctr.shutdown(); err != nil { + logrus.Debugf("libcontainerd: failed to shutdown container %s", ctr.containerID) + } else { + logrus.Debugf("libcontainerd: completed shutting down container %s", ctr.containerID) + } + if err := ctr.hcsContainer.Close(); err != nil { + logrus.Error(err) + } + } + + if err := process.hcsProcess.Close(); err != nil { + logrus.Errorf("libcontainerd: hcsProcess.Close(): %v", err) + } + + // Call into the backend to notify it of the state change. + logrus.Debugf("libcontainerd: waitExit() calling backend.StateChanged %+v", si) + if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { + logrus.Error(err) + } + + logrus.Debugf("libcontainerd: waitExit() completed OK, %+v", si) + + return nil +} + +// cleanProcess removes process from the map. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + delete(ctr.processes, id) +} + +// shutdown shuts down the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) shutdown() error { + const shutdownTimeout = time.Minute * 5 + err := ctr.hcsContainer.Shutdown() + if hcsshim.IsPending(err) { + // Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely. + err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error shutting down container %s %v calling terminate", ctr.containerID, err) + if err := ctr.terminate(); err != nil { + return err + } + return err + } + + return nil +} + +// terminate terminates the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) terminate() error { + const terminateTimeout = time.Minute * 5 + err := ctr.hcsContainer.Terminate() + + if hcsshim.IsPending(err) { + err = ctr.hcsContainer.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error terminating container %s %v", ctr.containerID, err) + return err + } + + return nil +} diff --git a/libcontainerd/oom_linux.go b/libcontainerd/oom_linux.go new file mode 100644 index 0000000..e126b7a --- /dev/null +++ b/libcontainerd/oom_linux.go @@ -0,0 +1,31 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/system" +) + +func setOOMScore(pid, score int) error { + oomScoreAdjPath := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + f, err := os.OpenFile(oomScoreAdjPath, os.O_WRONLY, 0) + if err != nil { + return err + } + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + f.Close() + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !system.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to %s", stringScore, oomScoreAdjPath) + } + return nil + } + return err +} diff --git a/libcontainerd/oom_solaris.go b/libcontainerd/oom_solaris.go new file mode 100644 index 0000000..2ebe5e8 --- /dev/null +++ b/libcontainerd/oom_solaris.go @@ -0,0 +1,5 @@ +package libcontainerd + +func setOOMScore(pid, score int) error { + return nil +} diff --git a/libcontainerd/pausemonitor_unix.go b/libcontainerd/pausemonitor_unix.go new file mode 100644 index 0000000..4f3766d --- /dev/null +++ b/libcontainerd/pausemonitor_unix.go @@ -0,0 +1,42 @@ +// +build !windows + +package libcontainerd + +import ( + "sync" +) + +// pauseMonitor is helper to get notifications from pause state changes. +type pauseMonitor struct { + sync.Mutex + waiters map[string][]chan struct{} +} + +func (m *pauseMonitor) handle(t string) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + return + } + q, ok := m.waiters[t] + if !ok { + return + } + if len(q) > 0 { + close(q[0]) + m.waiters[t] = q[1:] + } +} + +func (m *pauseMonitor) append(t string, waiter chan struct{}) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + m.waiters = make(map[string][]chan struct{}) + } + _, ok := m.waiters[t] + if !ok { + m.waiters[t] = make([]chan struct{}, 0) + } + m.waiters[t] = append(m.waiters[t], waiter) +} diff --git a/libcontainerd/process.go b/libcontainerd/process.go new file mode 100644 index 0000000..57562c8 --- /dev/null +++ b/libcontainerd/process.go @@ -0,0 +1,18 @@ +package libcontainerd + +// processCommon are the platform common fields as part of the process structure +// which keeps the state for the main container process, as well as any exec +// processes. +type processCommon struct { + client *client + + // containerID is the Container ID + containerID string + + // friendlyName is an identifier for the process (or `InitFriendlyName` + // for the first process) + friendlyName string + + // systemPid is the PID of the main container process + systemPid uint32 +} diff --git a/libcontainerd/process_unix.go b/libcontainerd/process_unix.go new file mode 100644 index 0000000..3b54e32 --- /dev/null +++ b/libcontainerd/process_unix.go @@ -0,0 +1,107 @@ +// +build linux solaris + +package libcontainerd + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + goruntime "runtime" + "strings" + + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +var fdNames = map[int]string{ + unix.Stdin: "stdin", + unix.Stdout: "stdout", + unix.Stderr: "stderr", +} + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + dir string +} + +func (p *process) openFifos(ctx context.Context, terminal bool) (pipe *IOPipe, err error) { + if err := os.MkdirAll(p.dir, 0700); err != nil { + return nil, err + } + + io := &IOPipe{} + + io.Stdin, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdin), unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdin.Close() + } + }() + + io.Stdout, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdout), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdout.Close() + } + }() + + if goruntime.GOOS == "solaris" || !terminal { + // For Solaris terminal handling is done exclusively by the runtime therefore we make no distinction + // in the processing for terminal and !terminal cases. + io.Stderr, err = fifo.OpenFifo(ctx, p.fifo(unix.Stderr), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + io.Stderr.Close() + } + }() + } else { + io.Stderr = ioutil.NopCloser(emptyReader{}) + } + + return io, nil +} + +func (p *process) sendCloseStdin() error { + _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: p.containerID, + Pid: p.friendlyName, + CloseStdin: true, + }) + if err != nil && (strings.Contains(err.Error(), "container not found") || strings.Contains(err.Error(), "process not found")) { + return nil + } + return err +} + +func (p *process) closeFifos(io *IOPipe) { + io.Stdin.Close() + io.Stdout.Close() + io.Stderr.Close() +} + +type emptyReader struct{} + +func (r emptyReader) Read(b []byte) (int, error) { + return 0, io.EOF +} + +func (p *process) fifo(index int) string { + return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) +} diff --git a/libcontainerd/process_windows.go b/libcontainerd/process_windows.go new file mode 100644 index 0000000..854c4dd --- /dev/null +++ b/libcontainerd/process_windows.go @@ -0,0 +1,48 @@ +package libcontainerd + +import ( + "io" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/ioutils" +) + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + hcsProcess hcsshim.Process +} + +type autoClosingReader struct { + io.ReadCloser + sync.Once +} + +func (r *autoClosingReader) Read(b []byte) (n int, err error) { + n, err = r.ReadCloser.Read(b) + if err == io.EOF { + r.Once.Do(func() { r.ReadCloser.Close() }) + } + return +} + +func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(pipe, func() error { + if err := pipe.Close(); err != nil { + return err + } + + err := process.CloseStdin() + if err != nil && !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyClosed(err) { + // This error will occur if the compute system is currently shutting down + if perr, ok := err.(*hcsshim.ProcessError); ok && perr.Err != hcsshim.ErrVmcomputeOperationInvalidState { + return err + } + } + + return nil + }) +} diff --git a/libcontainerd/queue_unix.go b/libcontainerd/queue_unix.go new file mode 100644 index 0000000..66765f7 --- /dev/null +++ b/libcontainerd/queue_unix.go @@ -0,0 +1,37 @@ +// +build linux solaris + +package libcontainerd + +import "sync" + +type queue struct { + sync.Mutex + fns map[string]chan struct{} +} + +func (q *queue) append(id string, f func()) { + q.Lock() + defer q.Unlock() + + if q.fns == nil { + q.fns = make(map[string]chan struct{}) + } + + done := make(chan struct{}) + + fn, ok := q.fns[id] + q.fns[id] = done + go func() { + if ok { + <-fn + } + f() + close(done) + + q.Lock() + if q.fns[id] == done { + delete(q.fns, id) + } + q.Unlock() + }() +} diff --git a/libcontainerd/queue_unix_test.go b/libcontainerd/queue_unix_test.go new file mode 100644 index 0000000..bb49a5d --- /dev/null +++ b/libcontainerd/queue_unix_test.go @@ -0,0 +1,33 @@ +// +build linux solaris + +package libcontainerd + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestSerialization(t *testing.T) { + var ( + q queue + serialization = 1 + ) + + q.append("aaa", func() { + //simulate a long time task + time.Sleep(10 * time.Millisecond) + require.EqualValues(t, serialization, 1) + serialization = 2 + }) + q.append("aaa", func() { + require.EqualValues(t, serialization, 2) + serialization = 3 + }) + q.append("aaa", func() { + require.EqualValues(t, serialization, 3) + serialization = 4 + }) + time.Sleep(20 * time.Millisecond) +} diff --git a/libcontainerd/remote.go b/libcontainerd/remote.go new file mode 100644 index 0000000..9031e3a --- /dev/null +++ b/libcontainerd/remote.go @@ -0,0 +1,20 @@ +package libcontainerd + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + Client(Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() + // UpdateOptions allows various remote options to be updated at runtime. + UpdateOptions(...RemoteOption) error +} + +// RemoteOption allows to configure parameters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} diff --git a/libcontainerd/remote_unix.go b/libcontainerd/remote_unix.go new file mode 100644 index 0000000..a81a93c --- /dev/null +++ b/libcontainerd/remote_unix.go @@ -0,0 +1,565 @@ +// +build linux solaris + +package libcontainerd + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + goruntime "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/system" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/transport" +) + +const ( + maxConnectionRetryCount = 3 + containerdHealthCheckTimeout = 3 * time.Second + containerdShutdownTimeout = 15 * time.Second + containerdBinary = "docker-containerd" + containerdPidFilename = "docker-containerd.pid" + containerdSockFilename = "docker-containerd.sock" + containerdStateDir = "containerd" + eventTimestampFilename = "event.ts" +) + +type remote struct { + sync.RWMutex + apiClient containerd.APIClient + daemonPid int + stateDir string + rpcAddr string + startDaemon bool + closedManually bool + debugLog bool + rpcConn *grpc.ClientConn + clients []*client + eventTsPath string + runtime string + runtimeArgs []string + daemonWaitCh chan struct{} + liveRestore bool + oomScore int + restoreFromTimestamp *timestamp.Timestamp +} + +// New creates a fresh instance of libcontainerd remote. +func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specified the correct address. Got error: %v", err) + } + }() + r := &remote{ + stateDir: stateDir, + daemonPid: -1, + eventTsPath: filepath.Join(stateDir, eventTimestampFilename), + } + for _, option := range options { + if err := option.Apply(r); err != nil { + return nil, err + } + } + + if err := system.MkdirAll(stateDir, 0700, ""); err != nil { + return nil, err + } + + if r.rpcAddr == "" { + r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) + } + + if r.startDaemon { + if err := r.runContainerdDaemon(); err != nil { + return nil, err + } + } + + // don't output the grpc reconnect logging + grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) + dialOpts := []grpc.DialOption{ + grpc.WithInsecure(), + grpc.WithBackoffMaxDelay(2 * time.Second), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + }), + } + conn, err := grpc.Dial(r.rpcAddr, dialOpts...) + if err != nil { + return nil, fmt.Errorf("error connecting to containerd: %v", err) + } + + r.rpcConn = conn + r.apiClient = containerd.NewAPIClient(conn) + + // Get the timestamp to restore from + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + r.restoreFromTimestamp = tsp + + go r.handleConnectionChange() + + if err := r.startEventsMonitor(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *remote) UpdateOptions(options ...RemoteOption) error { + for _, option := range options { + if err := option.Apply(r); err != nil { + return err + } + } + return nil +} + +func (r *remote) handleConnectionChange() { + var transientFailureCount = 0 + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(r.rpcConn) + + for { + <-ticker.C + ctx, cancel := context.WithTimeout(context.Background(), containerdHealthCheckTimeout) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err == nil { + continue + } + + logrus.Debugf("libcontainerd: containerd health check returned error: %v", err) + + if r.daemonPid != -1 { + if r.closedManually { + // Well, we asked for it to stop, just return + return + } + // all other errors are transient + // Reset state to be notified of next failure + transientFailureCount++ + if transientFailureCount >= maxConnectionRetryCount { + transientFailureCount = 0 + if system.IsProcessAlive(r.daemonPid) { + system.KillProcess(r.daemonPid) + } + <-r.daemonWaitCh + if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error + logrus.Errorf("libcontainerd: error restarting containerd: %v", err) + } + continue + } + } + } +} + +func (r *remote) Cleanup() { + if r.daemonPid == -1 { + return + } + r.closedManually = true + r.rpcConn.Close() + // Ask the daemon to quit + syscall.Kill(r.daemonPid, syscall.SIGTERM) + + // Wait up to 15secs for it to stop + for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second { + if !system.IsProcessAlive(r.daemonPid) { + break + } + time.Sleep(time.Second) + } + + if system.IsProcessAlive(r.daemonPid) { + logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid) + syscall.Kill(r.daemonPid, syscall.SIGKILL) + } + + // cleanup some files + os.Remove(filepath.Join(r.stateDir, containerdPidFilename)) + os.Remove(filepath.Join(r.stateDir, containerdSockFilename)) +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + remote: r, + exitNotifiers: make(map[string]*exitNotifier), + liveRestore: r.liveRestore, + } + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) updateEventTimestamp(t time.Time) { + f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) + if err != nil { + logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) + return + } + defer f.Close() + + b, err := t.MarshalText() + if err != nil { + logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) + return + } + + n, err := f.Write(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) + f.Truncate(0) + return + } +} + +func (r *remote) getLastEventTimestamp() time.Time { + t := time.Now() + + fi, err := os.Stat(r.eventTsPath) + if os.IsNotExist(err) || fi.Size() == 0 { + return t + } + + f, err := os.Open(r.eventTsPath) + if err != nil { + logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err) + return t + } + defer f.Close() + + b := make([]byte, fi.Size()) + n, err := f.Read(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err) + return t + } + + t.UnmarshalText(b) + + return t +} + +func (r *remote) startEventsMonitor() error { + // First, get past events + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + er := &containerd.EventsRequest{ + Timestamp: tsp, + } + + var events containerd.API_EventsClient + for { + events, err = r.apiClient.Events(context.Background(), er, grpc.FailFast(false)) + if err == nil { + break + } + logrus.Warnf("libcontainerd: failed to get events from containerd: %q", err) + + if r.closedManually { + // ignore error if grpc remote connection is closed manually + return nil + } + + <-time.After(100 * time.Millisecond) + } + + go r.handleEventStream(events) + return nil +} + +func (r *remote) handleEventStream(events containerd.API_EventsClient) { + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && + r.closedManually { + // ignore error if grpc remote connection is closed manually + return + } + logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err) + go r.startEventsMonitor() + return + } + + logrus.Debugf("libcontainerd: received containerd event: %#v", e) + + var container *container + var c *client + r.RLock() + for _, c = range r.clients { + container, err = c.getContainer(e.Id) + if err == nil { + break + } + } + r.RUnlock() + if container == nil { + logrus.Warnf("libcontainerd: unknown container %s", e.Id) + continue + } + + if err := container.handleEvent(e); err != nil { + logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err) + } + + tsp, err := ptypes.Timestamp(e.Timestamp) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err) + continue + } + + r.updateEventTimestamp(tsp) + } +} + +func (r *remote) runContainerdDaemon() error { + pidFilename := filepath.Join(r.stateDir, containerdPidFilename) + f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + + // File exist, check if the daemon is alive + b := make([]byte, 8) + n, err := f.Read(b) + if err != nil && err != io.EOF { + return err + } + + if n > 0 { + pid, err := strconv.ParseUint(string(b[:n]), 10, 64) + if err != nil { + return err + } + if system.IsProcessAlive(int(pid)) { + logrus.Infof("libcontainerd: previous instance of containerd still alive (%d)", pid) + r.daemonPid = int(pid) + return nil + } + } + + // rewind the file + _, err = f.Seek(0, os.SEEK_SET) + if err != nil { + return err + } + + // Truncate it + err = f.Truncate(0) + if err != nil { + return err + } + + // Start a new instance + args := []string{ + "-l", fmt.Sprintf("unix://%s", r.rpcAddr), + "--metrics-interval=0", + "--start-timeout", "2m", + "--state-dir", filepath.Join(r.stateDir, containerdStateDir), + } + if goruntime.GOOS == "solaris" { + args = append(args, "--shim", "containerd-shim", "--runtime", "runc") + } else { + args = append(args, "--shim", "docker-containerd-shim") + if r.runtime != "" { + args = append(args, "--runtime") + args = append(args, r.runtime) + } + } + if r.debugLog { + args = append(args, "--debug") + } + if len(r.runtimeArgs) > 0 { + for _, v := range r.runtimeArgs { + args = append(args, "--runtime-args") + args = append(args, v) + } + logrus.Debugf("libcontainerd: runContainerdDaemon: runtimeArgs: %s", args) + } + + cmd := exec.Command(containerdBinary, args...) + // redirect containerd logs to docker logs + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.SysProcAttr = setSysProcAttr(true) + cmd.Env = nil + // clear the NOTIFY_SOCKET from the env when starting containerd + for _, e := range os.Environ() { + if !strings.HasPrefix(e, "NOTIFY_SOCKET") { + cmd.Env = append(cmd.Env, e) + } + } + if err := cmd.Start(); err != nil { + return err + } + + // unless strictly necessary, do not add anything in between here + // as the reaper goroutine below needs to kick in as soon as possible + // and any "return" from code paths added here will defeat the reaper + // process. + + r.daemonWaitCh = make(chan struct{}) + go func() { + cmd.Wait() + close(r.daemonWaitCh) + }() // Reap our child when needed + + logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid) + if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { + system.KillProcess(cmd.Process.Pid) + return err + } + if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil { + system.KillProcess(cmd.Process.Pid) + return err + } + + r.daemonPid = cmd.Process.Pid + return nil +} + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.rpcAddr = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRuntimePath sets the path of the runtime to be used as the +// default by containerd +func WithRuntimePath(rt string) RemoteOption { + return runtimePath(rt) +} + +type runtimePath string + +func (rt runtimePath) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtime = string(rt) + return nil + } + return fmt.Errorf("WithRuntime option not supported for this remote") +} + +// WithRuntimeArgs sets the list of runtime args passed to containerd +func WithRuntimeArgs(args []string) RemoteOption { + return runtimeArgs(args) +} + +type runtimeArgs []string + +func (rt runtimeArgs) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtimeArgs = rt + return nil + } + return fmt.Errorf("WithRuntimeArgs option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithDebugLog defines if containerd debug logs will be enabled for daemon. +func WithDebugLog(debug bool) RemoteOption { + return debugLog(debug) +} + +type debugLog bool + +func (d debugLog) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.debugLog = bool(d) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} + +// WithLiveRestore defines if containers are stopped on shutdown or restored. +func WithLiveRestore(v bool) RemoteOption { + return liveRestore(v) +} + +type liveRestore bool + +func (l liveRestore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.liveRestore = bool(l) + for _, c := range remote.clients { + c.liveRestore = bool(l) + } + return nil + } + return fmt.Errorf("WithLiveRestore option not supported for this remote") +} + +// WithOOMScore defines the oom_score_adj to set for the containerd process. +func WithOOMScore(score int) RemoteOption { + return oomScore(score) +} + +type oomScore int + +func (o oomScore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.oomScore = int(o) + return nil + } + return fmt.Errorf("WithOOMScore option not supported for this remote") +} diff --git a/libcontainerd/remote_windows.go b/libcontainerd/remote_windows.go new file mode 100644 index 0000000..74c1044 --- /dev/null +++ b/libcontainerd/remote_windows.go @@ -0,0 +1,36 @@ +package libcontainerd + +import "github.com/docker/docker/pkg/locker" + +type remote struct { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + } + return c, nil +} + +// Cleanup is a no-op on Windows. It is here to implement the interface. +func (r *remote) Cleanup() { +} + +func (r *remote) UpdateOptions(opts ...RemoteOption) error { + return nil +} + +// New creates a fresh instance of libcontainerd remote. On Windows, +// this is not used as there is no remote containerd process. +func New(_ string, _ ...RemoteOption) (Remote, error) { + return &remote{}, nil +} + +// WithLiveRestore is a noop on windows. +func WithLiveRestore(v bool) RemoteOption { + return nil +} diff --git a/libcontainerd/types.go b/libcontainerd/types.go new file mode 100644 index 0000000..c7ade6b --- /dev/null +++ b/libcontainerd/types.go @@ -0,0 +1,75 @@ +package libcontainerd + +import ( + "io" + + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +// State constants used in state change reporting. +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateRestore = "restore" + StateExitProcess = "exit-process" + StateOOM = "oom" // fake state +) + +// CommonStateInfo contains the state info common to all platforms. +type CommonStateInfo struct { // FIXME: event? + State string + Pid uint32 + ExitCode uint32 + ProcessID string +} + +// Backend defines callbacks that the client of the library needs to implement. +type Backend interface { + StateChanged(containerID string, state StateInfo) error +} + +// Client provides access to containerd features. +type Client interface { + GetServerVersion(ctx context.Context) (*ServerVersion, error) + Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error + Signal(containerID string, sig int) error + SignalProcess(containerID string, processFriendlyName string, sig int) error + AddProcess(ctx context.Context, containerID, processFriendlyName string, process Process, attachStdio StdioCallback) (int, error) + Resize(containerID, processFriendlyName string, width, height int) error + Pause(containerID string) error + Resume(containerID string) error + Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error + Stats(containerID string) (*Stats, error) + GetPidsForContainer(containerID string) ([]int, error) + Summary(containerID string) ([]Summary, error) + UpdateResources(containerID string, resources Resources) error + CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error + DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error + ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) +} + +// CreateOption allows to configure parameters of container creation. +type CreateOption interface { + Apply(interface{}) error +} + +// StdioCallback is called to connect a container or process stdio. +type StdioCallback func(IOPipe) error + +// IOPipe contains the stdio streams. +type IOPipe struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser + Terminal bool // Whether stderr is connected on Windows +} + +// ServerVersion contains version information as retrieved from the +// server +type ServerVersion struct { + containerd.GetServerVersionResponse +} diff --git a/libcontainerd/types_linux.go b/libcontainerd/types_linux.go new file mode 100644 index 0000000..4f06358 --- /dev/null +++ b/libcontainerd/types_linux.go @@ -0,0 +1,49 @@ +package libcontainerd + +import ( + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []specs.LinuxRlimit `json:"rlimits,omitempty"` + // ApparmorProfile specifies the apparmor profile for the container. + ApparmorProfile *string `json:"apparmorProfile,omitempty"` + // SelinuxLabel specifies the selinux context that the container process is run as. + SelinuxLabel *string `json:"selinuxLabel,omitempty"` +} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Stats contains a stats properties from containerd. +type Stats containerd.StatsResponse + +// Summary contains a container summary from containerd +type Summary struct{} + +// Resources defines updatable container resource values. +type Resources containerd.UpdateResource + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/libcontainerd/types_solaris.go b/libcontainerd/types_solaris.go new file mode 100644 index 0000000..2ab18eb --- /dev/null +++ b/libcontainerd/types_solaris.go @@ -0,0 +1,43 @@ +package libcontainerd + +import ( + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` +} + +// Stats contains a stats properties from containerd. +type Stats struct{} + +// Summary contains a container summary from containerd +type Summary struct{} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Resources defines updatable container resource values. +type Resources struct{} + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/libcontainerd/types_windows.go b/libcontainerd/types_windows.go new file mode 100644 index 0000000..317bfb0 --- /dev/null +++ b/libcontainerd/types_windows.go @@ -0,0 +1,79 @@ +package libcontainerd + +import ( + "github.com/Microsoft/hcsshim" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process specs.Process + +// Summary contains a ProcessList item from HCS to support `top` +type Summary hcsshim.ProcessListItem + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container. +} + +// Stats contains statistics from HCS +type Stats hcsshim.Statistics + +// Resources defines updatable container resource values. +type Resources struct{} + +// ServicingOption is a CreateOption with a no-op application that signifies +// the container needs to be used for a Windows servicing operation. +type ServicingOption struct { + IsServicing bool +} + +// FlushOption is a CreateOption that signifies if the container should be +// started with flushes ignored until boot has completed. This is an optimisation +// for first boot of a container. +type FlushOption struct { + IgnoreFlushesDuringBoot bool +} + +// HyperVIsolationOption is a CreateOption that indicates whether the runtime +// should start the container as a Hyper-V container. +type HyperVIsolationOption struct { + IsHyperV bool +} + +// LayerOption is a CreateOption that indicates to the runtime the layer folder +// and layer paths for a container. +type LayerOption struct { + // LayerFolderPath is the path to the current layer folder. Empty for Hyper-V containers. + LayerFolderPath string `json:",omitempty"` + // Layer paths of the parent layers + LayerPaths []string +} + +// NetworkEndpointsOption is a CreateOption that provides the runtime list +// of network endpoints to which a container should be attached during its creation. +type NetworkEndpointsOption struct { + Endpoints []string + AllowUnqualifiedDNSQuery bool + DNSSearchList []string + NetworkSharedContainerID string +} + +// CredentialsOption is a CreateOption that indicates the credentials from +// a credential spec to be used to the runtime +type CredentialsOption struct { + Credentials string +} + +// Checkpoint holds the details of a checkpoint (not supported in windows) +type Checkpoint struct { + Name string +} + +// Checkpoints contains the details of a checkpoint +type Checkpoints struct { + Checkpoints []*Checkpoint +} diff --git a/libcontainerd/utils_linux.go b/libcontainerd/utils_linux.go new file mode 100644 index 0000000..5fd5bf6 --- /dev/null +++ b/libcontainerd/utils_linux.go @@ -0,0 +1,62 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func hostIDFromMap(id uint32, mp []specs.LinuxIDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +func convertRlimits(sr []specs.LinuxRlimit) (cr []*containerd.Rlimit) { + for _, r := range sr { + cr = append(cr, &containerd.Rlimit{ + Type: r.Type, + Hard: r.Hard, + Soft: r.Soft, + }) + } + return +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setsid: sid, + Pdeathsig: syscall.SIGKILL, + } +} diff --git a/libcontainerd/utils_solaris.go b/libcontainerd/utils_solaris.go new file mode 100644 index 0000000..10ae599 --- /dev/null +++ b/libcontainerd/utils_solaris.go @@ -0,0 +1,27 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/containerd/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + return 0, 0, nil +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return nil +} diff --git a/libcontainerd/utils_windows.go b/libcontainerd/utils_windows.go new file mode 100644 index 0000000..41ac40d --- /dev/null +++ b/libcontainerd/utils_windows.go @@ -0,0 +1,46 @@ +package libcontainerd + +import "strings" + +// setupEnvironmentVariables converts a string array of environment variables +// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. +func setupEnvironmentVariables(a []string) map[string]string { + r := make(map[string]string) + for _, s := range a { + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + r[arr[0]] = arr[1] + } + } + return r +} + +// Apply for a servicing option is a no-op. +func (s *ServicingOption) Apply(interface{}) error { + return nil +} + +// Apply for the flush option is a no-op. +func (f *FlushOption) Apply(interface{}) error { + return nil +} + +// Apply for the hypervisolation option is a no-op. +func (h *HyperVIsolationOption) Apply(interface{}) error { + return nil +} + +// Apply for the layer option is a no-op. +func (h *LayerOption) Apply(interface{}) error { + return nil +} + +// Apply for the network endpoints option is a no-op. +func (s *NetworkEndpointsOption) Apply(interface{}) error { + return nil +} + +// Apply for the credentials option is a no-op. +func (s *CredentialsOption) Apply(interface{}) error { + return nil +} diff --git a/libcontainerd/utils_windows_test.go b/libcontainerd/utils_windows_test.go new file mode 100644 index 0000000..f3679bf --- /dev/null +++ b/libcontainerd/utils_windows_test.go @@ -0,0 +1,13 @@ +package libcontainerd + +import ( + "testing" +) + +func TestEnvironmentParsing(t *testing.T) { + env := []string{"foo=bar", "car=hat", "a=b=c"} + result := setupEnvironmentVariables(env) + if len(result) != 3 || result["foo"] != "bar" || result["car"] != "hat" || result["a"] != "b=c" { + t.Fatalf("Expected map[foo:bar car:hat a:b=c], got %v", result) + } +} diff --git a/migrate/v1/migratev1.go b/migrate/v1/migratev1.go new file mode 100644 index 0000000..3cb8828 --- /dev/null +++ b/migrate/v1/migratev1.go @@ -0,0 +1,506 @@ +package v1 + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "sync" + "time" + + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + imagev1 "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + refstore "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" +) + +type graphIDRegistrar interface { + RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type graphIDMounter interface { + CreateRWLayerByGraphID(string, string, layer.ChainID) error +} + +type checksumCalculator interface { + ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) +} + +const ( + graphDirName = "graph" + tarDataFileName = "tar-data.json.gz" + migrationFileName = ".migration-v1-images.json" + migrationTagsFileName = ".migration-v1-tags" + migrationDiffIDFileName = ".migration-diffid" + migrationSizeFileName = ".migration-size" + migrationTarDataFileName = ".migration-tardata" + containersDirName = "containers" + configFileNameLegacy = "config.json" + configFileName = "config.v2.json" + repositoriesFilePrefixLegacy = "repositories-" +) + +var ( + errUnsupported = errors.New("migration is not supported") +) + +// Migrate takes an old graph directory and transforms the metadata into the +// new format. +func Migrate(root, driverName string, ls layer.Store, is image.Store, rs refstore.Store, ms metadata.Store) error { + graphDir := filepath.Join(root, graphDirName) + if _, err := os.Lstat(graphDir); os.IsNotExist(err) { + return nil + } + + mappings, err := restoreMappings(root) + if err != nil { + return err + } + + if cc, ok := ls.(checksumCalculator); ok { + CalculateLayerChecksums(root, cc, mappings) + } + + if registrar, ok := ls.(graphIDRegistrar); !ok { + return errUnsupported + } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { + return err + } + + err = saveMappings(root, mappings) + if err != nil { + return err + } + + if mounter, ok := ls.(graphIDMounter); !ok { + return errUnsupported + } else if err := migrateContainers(root, mounter, is, mappings); err != nil { + return err + } + + if err := migrateRefs(root, driverName, rs, mappings); err != nil { + return err + } + + return nil +} + +// CalculateLayerChecksums walks an old graph directory and calculates checksums +// for each layer. These checksums are later used for migration. +func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { + graphDir := filepath.Join(root, graphDirName) + // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io + workers := runtime.NumCPU() * 3 + workQueue := make(chan string, workers) + + wg := sync.WaitGroup{} + + for i := 0; i < workers; i++ { + wg.Add(1) + go func() { + for id := range workQueue { + start := time.Now() + if err := calculateLayerChecksum(graphDir, id, ls); err != nil { + logrus.Errorf("could not calculate checksum for %q, %q", id, err) + } + elapsed := time.Since(start) + logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) + } + wg.Done() + }() + } + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + logrus.Errorf("could not read directory %q", graphDir) + return + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, ok := mappings[v1ID]; ok { // support old migrations without helper files + continue + } + workQueue <- v1ID + } + close(workQueue) + wg.Wait() +} + +func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { + diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) + if _, err := os.Lstat(diffIDFile); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + + parent, err := getParent(filepath.Join(graphDir, id)) + if err != nil { + return err + } + + diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { + return err + } + + if err := ioutils.AtomicWriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil { + return err + } + + logrus.Infof("calculated checksum for layer %s: %s", id, diffID) + return nil +} + +func restoreMappings(root string) (map[string]image.ID, error) { + mappings := make(map[string]image.ID) + + mfile := filepath.Join(root, migrationFileName) + f, err := os.Open(mfile) + if err != nil && !os.IsNotExist(err) { + return nil, err + } else if err == nil { + err := json.NewDecoder(f).Decode(&mappings) + if err != nil { + f.Close() + return nil, err + } + f.Close() + } + + return mappings, nil +} + +func saveMappings(root string, mappings map[string]image.ID) error { + mfile := filepath.Join(root, migrationFileName) + f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + return json.NewEncoder(f).Encode(mappings) +} + +func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { + graphDir := filepath.Join(root, graphDirName) + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + return err + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, exists := mappings[v1ID]; exists { + continue + } + if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { + continue + } + } + + return nil +} + +func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { + containersDir := filepath.Join(root, containersDirName) + dir, err := ioutil.ReadDir(containersDir) + if err != nil { + return err + } + for _, v := range dir { + id := v.Name() + + if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { + continue + } + + containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) + if err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(containerJSON, &c); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageStrJSON, ok := c["Image"] + if !ok { + return fmt.Errorf("invalid container configuration for %v", id) + } + + var image string + if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageID, ok := imageMappings[image] + if !ok { + logrus.Errorf("image not migrated %v", imageID) // non-fatal error + continue + } + + c["Image"] = rawJSON(imageID) + + containerJSON, err = json.Marshal(c) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { + return err + } + + img, err := is.Get(imageID) + if err != nil { + return err + } + + if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + logrus.Infof("migrated container %s to point to %s", id, imageID) + + } + return nil +} + +type refAdder interface { + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error +} + +func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { + migrationFile := filepath.Join(root, migrationTagsFileName) + if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { + return err + } + + type repositories struct { + Repositories map[string]map[string]string + } + + var repos repositories + + f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&repos); err != nil { + return err + } + + for name, repo := range repos.Repositories { + for tag, id := range repo { + if strongID, exists := mappings[id]; exists { + ref, err := reference.ParseNormalizedNamed(name) + if err != nil { + logrus.Errorf("migrate tags: invalid name %q, %q", name, err) + continue + } + if !reference.IsNameOnly(ref) { + logrus.Errorf("migrate tags: invalid name %q, unexpected tag or digest", name) + continue + } + if dgst, err := digest.Parse(tag); err == nil { + canonical, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) + continue + } + if err := rs.AddDigest(canonical, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate digest %q for %q, err: %q", reference.FamiliarString(ref), strongID, err) + } + } else { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) + continue + } + if err := rs.AddTag(tagRef, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate tag %q for %q, err: %q", reference.FamiliarString(ref), strongID, err) + } + } + logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) + } + } + } + + mf, err := os.Create(migrationFile) + if err != nil { + return err + } + mf.Close() + + return nil +} + +func getParent(confDir string) (string, error) { + jsonFile := filepath.Join(confDir, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return "", err + } + var parent struct { + Parent string + ParentID digest.Digest `json:"parent_id"` + } + if err := json.Unmarshal(imageJSON, &parent); err != nil { + return "", err + } + if parent.Parent == "" && parent.ParentID != "" { // v1.9 + parent.Parent = parent.ParentID.Hex() + } + // compatibilityID for parent + parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) + if err == nil && len(parentCompatibilityID) > 0 { + parent.Parent = string(parentCompatibilityID) + } + return parent.Parent, nil +} + +func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { + defer func() { + if err != nil { + logrus.Errorf("migration failed for %v, err: %v", id, err) + } + }() + + parent, err := getParent(filepath.Join(root, graphDirName, id)) + if err != nil { + return err + } + + var parentID image.ID + if parent != "" { + var exists bool + if parentID, exists = mappings[parent]; !exists { + if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { + // todo: fail or allow broken chains? + return err + } + parentID = mappings[parent] + } + } + + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) + if err != nil { + return err + } + diffID, err := digest.Parse(string(diffIDData)) + if err != nil { + return err + } + + sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) + if err != nil { + return err + } + size, err := strconv.ParseInt(string(sizeStr), 10, 64) + if err != nil { + return err + } + + layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) + if err != nil { + return err + } + logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) + + jsonFile := filepath.Join(root, graphDirName, id, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return err + } + + h, err := imagev1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + rootFS.Append(layer.DiffID()) + + config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + strongID, err := is.Create(config) + if err != nil { + return err + } + logrus.Infof("migrated image %s to %s", id, strongID) + + if parentID != "" { + if err := is.SetParent(strongID, parentID); err != nil { + return err + } + } + + checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) + if err == nil { // best effort + dgst, err := digest.Parse(string(checksum)) + if err == nil { + V2MetadataService := metadata.NewV2MetadataService(ms) + V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) + } + } + _, err = ls.Release(layer) + if err != nil { + return err + } + + mappings[id] = strongID + return +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/migrate/v1/migratev1_test.go b/migrate/v1/migratev1_test.go new file mode 100644 index 0000000..ef4ac27 --- /dev/null +++ b/migrate/v1/migratev1_test.go @@ -0,0 +1,448 @@ +package v1 + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +func TestMigrateRefs(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-tags") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) + + ta := &mockTagAdder{} + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + }) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "docker.io/library/busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "docker.io/library/busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "docker.io/library/registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", + } + + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } + + // second migration is no-op + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + }) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } +} + +func TestMigrateContainers(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-containers") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + // container with invalid image + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + ls := &mockMounter{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, runtime.GOOS, ls) + if err != nil { + t.Fatal(err) + } + + imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ + "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, + }) + if err != nil { + t.Fatal(err) + } + + expected := []mountInfo{{ + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", + }} + if !reflect.DeepEqual(expected, ls.mounts) { + t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) + } + + if actual, expected := ls.count, 0; actual != expected { + t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) + } + + config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) + if err != nil { + t.Fatal(err) + } + var config struct{ Image string } + err = json.Unmarshal(config2, &config) + if err != nil { + t.Fatal(err) + } + + if actual, expected := config.Image, string(imgID); actual != expected { + t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) + } + +} + +func TestMigrateImages(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-images") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // busybox from 1.9 + id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") + if err != nil { + t.Fatal(err) + } + + id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") + if err != nil { + t.Fatal(err) + } + + ls := &mockRegistrar{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, runtime.GOOS, ls) + if err != nil { + t.Fatal(err) + } + + ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution"), runtime.GOOS) + if err != nil { + t.Fatal(err) + } + mappings := make(map[string]image.ID) + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected := map[string]image.ID{ + id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), + id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), + } + + if !reflect.DeepEqual(mappings, expected) { + t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) + } + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + ls.count = 0 + + // next images are busybox from 1.8.2 + _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + if err != nil { + t.Fatal(err) + } + + _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") + if err != nil { + t.Fatal(err) + } + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") + expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + + v2MetadataService := metadata.NewV2MetadataService(ms) + receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID()) + if err != nil { + t.Fatal(err) + } + + expectedMetadata := []metadata.V2Metadata{ + {Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")}, + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if !reflect.DeepEqual(expectedMetadata, receivedMetadata) { + t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata) + } + +} + +func TestMigrateUnsupported(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) + if err != nil { + t.Fatal(err) + } + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != errUnsupported { + t.Fatalf("expected unsupported error, got %q", err) + } +} + +func TestMigrateEmptyDir(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func addImage(dest, jsonConfig, parent, checksum string) (string, error) { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return "", err + } + if config.ID == "" { + b := make([]byte, 32) + rand.Read(b) + config.ID = hex.EncodeToString(b) + } + contDir := filepath.Join(dest, "graph", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { + return "", err + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil { + return "", err + } + if parent != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { + return "", err + } + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + return config.ID, nil +} + +func addContainer(dest, jsonConfig string) error { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return err + } + contDir := filepath.Join(dest, "containers", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { + return err + } + return nil +} + +type mockTagAdder struct { + refs map[string]string +} + +func (t *mockTagAdder) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if t.refs == nil { + t.refs = make(map[string]string) + } + t.refs[ref.String()] = id.String() + return nil +} +func (t *mockTagAdder) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return t.AddTag(ref, id, force) +} + +type mockRegistrar struct { + layers map[layer.ChainID]*mockLayer + count int +} + +func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) { + r.count++ + l := &mockLayer{} + if parent != "" { + p, exists := r.layers[parent] + if !exists { + return nil, fmt.Errorf("invalid parent %q", parent) + } + l.parent = p + l.diffIDs = append(l.diffIDs, p.diffIDs...) + } + l.diffIDs = append(l.diffIDs, diffID) + if r.layers == nil { + r.layers = make(map[layer.ChainID]*mockLayer) + } + r.layers[l.ChainID()] = l + return l, nil +} +func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} +func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +type mountInfo struct { + name, graphID, parent string +} +type mockMounter struct { + mounts []mountInfo + count int +} + +func (r *mockMounter) CreateRWLayerByGraphID(name string, graphID string, parent layer.ChainID) error { + r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) + return nil +} +func (r *mockMounter) Unmount(string) error { + r.count-- + return nil +} +func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} + +type mockLayer struct { + diffIDs []layer.DiffID + parent *mockLayer +} + +func (l *mockLayer) TarStream() (io.ReadCloser, error) { + return nil, nil +} + +func (ml *mockLayer) TarSeekStream() (ioutils.ReadSeekCloser, error) { + return nil, fmt.Errorf("not implemented") +} + +func (l *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, nil +} + +func (l *mockLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *mockLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *mockLayer) Parent() layer.Layer { + if l.parent == nil { + return nil + } + return l.parent +} + +func (l *mockLayer) Size() (int64, error) { + return 0, nil +} + +func (l *mockLayer) DiffSize() (int64, error) { + return 0, nil +} + +func (l *mockLayer) Platform() layer.Platform { + return "" +} + +func (l *mockLayer) Metadata() (map[string]string, error) { + return nil, nil +} diff --git a/oci/defaults.go b/oci/defaults.go new file mode 100644 index 0000000..4376faf --- /dev/null +++ b/oci/defaults.go @@ -0,0 +1,221 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +func defaultCapabilities() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +// DefaultSpec returns the default spec used by docker for the current Platform +func DefaultSpec() specs.Spec { + return DefaultOSSpec(runtime.GOOS) +} + +// DefaultOSSpec returns the spec for a given OS +func DefaultOSSpec(osName string) specs.Spec { + if osName == "windows" { + return DefaultWindowsSpec() + } else if osName == "solaris" { + return DefaultSolarisSpec() + } else { + return DefaultLinuxSpec() + } +} + +// DefaultWindowsSpec create a default spec for running Windows containers +func DefaultWindowsSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + Windows: &specs.Windows{}, + } +} + +// DefaultSolarisSpec create a default spec for running Solaris containers +func DefaultSolarisSpec() specs.Spec { + s := specs.Spec{ + Version: "0.6.0", + Platform: specs.Platform{ + OS: "SunOS", + Arch: runtime.GOARCH, + }, + } + s.Solaris = &specs.Solaris{} + return s +} + +// DefaultLinuxSpec create a default spec for running Linux containers +func DefaultLinuxSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: "linux", + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + s.Process.Capabilities = &specs.LinuxCapabilities{ + Bounding: defaultCapabilities(), + Permitted: defaultCapabilities(), + Inheritable: defaultCapabilities(), + Effective: defaultCapabilities(), + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.LinuxNamespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind-mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.LinuxDevice{}, + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ + { + Allow: false, + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(5), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(3), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(9), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(8), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(0), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(1), + Access: "rwm", + }, + { + Allow: false, + Type: "c", + Major: iPtr(10), + Minor: iPtr(229), + Access: "rwm", + }, + }, + }, + } + + // For LCOW support, don't mask /sys/firmware + if runtime.GOOS != "windows" { + s.Linux.MaskedPaths = append(s.Linux.MaskedPaths, "/sys/firmware") + } + + return s +} diff --git a/oci/devices_linux.go b/oci/devices_linux.go new file mode 100644 index 0000000..fa9c726 --- /dev/null +++ b/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. +func Device(d *configs.Device) specs.LinuxDevice { + return specs.LinuxDevice{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { + t := string(d.Type) + return specs.LinuxDeviceCgroup{ + Allow: true, + Type: t, + Major: &d.Major, + Minor: &d.Minor, + Access: d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/oci/devices_unsupported.go b/oci/devices_unsupported.go new file mode 100644 index 0000000..b5d3fab --- /dev/null +++ b/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/oci/namespaces.go b/oci/namespaces.go new file mode 100644 index 0000000..cb222dc --- /dev/null +++ b/oci/namespaces.go @@ -0,0 +1,13 @@ +package oci + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) + return + } + } +} diff --git a/opts/env.go b/opts/env.go new file mode 100644 index 0000000..e6ddd73 --- /dev/null +++ b/opts/env.go @@ -0,0 +1,46 @@ +package opts + +import ( + "fmt" + "os" + "runtime" + "strings" +) + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it returns the current value using os.Getenv. +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate what so ever, it's up to application inside docker +// to validate them or not. +// +// The only validation here is to check if name is empty, per #25099 +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if arr[0] == "" { + return "", fmt.Errorf("invalid environment variable: %s", val) + } + if len(arr) > 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if runtime.GOOS == "windows" { + // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. + if strings.EqualFold(parts[0], name) { + return true + } + } + if parts[0] == name { + return true + } + } + return false +} diff --git a/opts/env_test.go b/opts/env_test.go new file mode 100644 index 0000000..6f6c7a7 --- /dev/null +++ b/opts/env_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "fmt" + "os" + "runtime" + "testing" +) + +func TestValidateEnv(t *testing.T) { + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + "asd!qwe": "asd!qwe", + "1asd": "1asd", + "123": "123", + "some space": "some space", + " some space before": " some space before", + "some space after ": "some space after ", + } + // Environment variables are case in-sensitive on Windows + if runtime.GOOS == "windows" { + valids["PaTh"] = fmt.Sprintf("PaTh=%v", os.Getenv("PATH")) + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} diff --git a/opts/hosts.go b/opts/hosts.go new file mode 100644 index 0000000..594cccf --- /dev/null +++ b/opts/hosts.go @@ -0,0 +1,165 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDockerDaemonHost + if host != "" { + _, err := parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for TLS + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDockerDaemonHost(addr string) (string, error) { + addrParts := strings.SplitN(addr, "://", 2) + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} diff --git a/opts/hosts_test.go b/opts/hosts_test.go new file mode 100644 index 0000000..8aada6a --- /dev/null +++ b/opts/hosts_test.go @@ -0,0 +1,181 @@ +package opts + +import ( + "fmt" + "strings" + "testing" +) + +func TestParseHost(t *testing.T) { + invalid := []string{ + "something with spaces", + "://", + "unknown://", + "tcp://:port", + "tcp://invalid:port", + } + + valid := map[string]string{ + "": DefaultHost, + " ": DefaultHost, + " ": DefaultHost, + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), + "tcp://": DefaultTCPHost, + "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), + "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix://" + DefaultUnixSocket, + "unix://path/to/socket": "unix://path/to/socket", + "npipe://": "npipe://" + DefaultNamedPipe, + "npipe:////./pipe/foo": "npipe:////./pipe/foo", + } + + for _, value := range invalid { + if _, err := ParseHost(false, value); err == nil { + t.Errorf("Expected an error for %v, got [nil]", value) + } + } + + for value, expected := range valid { + if actual, err := ParseHost(false, value); err != nil || actual != expected { + t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) + } + } +} + +func TestParseDockerDaemonHost(t *testing.T) { + invalids := map[string]string{ + + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock", + " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", + "": "Invalid bind address format: ", + } + valids := map[string]string{ + "0.0.0.1:": "tcp://0.0.0.1:2375", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + "[::1]:": "tcp://[::1]:2375", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), + ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), + "tcp://": DefaultTCPHost, + "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), + "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix://" + DefaultUnixSocket, + "fd://": "fd://", + "fd://something": "fd://something", + "localhost:": "tcp://localhost:2375", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "[::1]:": "tcp://[::1]:2376", + "[::1]:5555": "tcp://[::1]:5555", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", + "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + "localhost:": "tcp://localhost:2376", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := parseSimpleProtoAddr("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if _, err := parseSimpleProtoAddr("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := parseSimpleProtoAddr("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } +} + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} diff --git a/opts/hosts_unix.go b/opts/hosts_unix.go new file mode 100644 index 0000000..611407a --- /dev/null +++ b/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/opts/hosts_windows.go b/opts/hosts_windows.go new file mode 100644 index 0000000..7c239e0 --- /dev/null +++ b/opts/hosts_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package opts + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/opts/ip.go b/opts/ip.go new file mode 100644 index 0000000..fb03b50 --- /dev/null +++ b/opts/ip.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parseable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/opts/ip_test.go b/opts/ip_test.go new file mode 100644 index 0000000..1027d84 --- /dev/null +++ b/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIPOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IPOpt{IP: &ip} + + invalidIP := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIP) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/opts/opts.go b/opts/opts.go new file mode 100644 index 0000000..8d82f76 --- /dev/null +++ b/opts/opts.go @@ -0,0 +1,327 @@ +package opts + +import ( + "fmt" + "net" + "path" + "regexp" + "strings" + + units "github.com/docker/go-units" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + if len(*opts.values) == 0 { + return "" + } + return fmt.Sprintf("%v", *opts.values) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// WithValidator returns the ListOpts with validator set. +func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { + opts.validator = validator + return opts +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) +type MemBytes int64 + +// String returns the string format of the human readable memory bytes +func (m *MemBytes) String() string { + // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. + // We return "0" in case value is 0 here so that the default value is hidden. + // (Sometimes "default 0 B" is actually misleading) + if m.Value() != 0 { + return units.BytesSize(float64(m.Value())) + } + return "0" +} + +// Set sets the value of the MemBytes by passing a string +func (m *MemBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = MemBytes(val) + return err +} + +// Type returns the type +func (m *MemBytes) Type() string { + return "bytes" +} + +// Value returns the value in int64 +func (m *MemBytes) Value() int64 { + return int64(*m) +} + +// UnmarshalJSON is the customized unmarshaler for MemBytes +func (m *MemBytes) UnmarshalJSON(s []byte) error { + if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { + return fmt.Errorf("invalid size: %q", s) + } + val, err := units.RAMInBytes(string(s[1 : len(s)-1])) + *m = MemBytes(val) + return err +} diff --git a/opts/opts_test.go b/opts/opts_test.go new file mode 100644 index 0000000..1afe3c1 --- /dev/null +++ b/opts/opts_test.go @@ -0,0 +1,264 @@ +package opts + +import ( + "fmt" + "strings" + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewMapOpts(tmpMap, logOptsValidator) + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + + o.Set("max-file=2") + if len(tmpMap) != 2 { + t.Errorf("map length %d != 2", len(tmpMap)) + } + + if tmpMap["max-file"] != "2" { + t.Errorf("max-file = %s != 2", tmpMap["max-file"]) + } + + if tmpMap["max-size"] != "1" { + t.Errorf("max-size = %s != 1", tmpMap["max-size"]) + } + if o.Set("dummy-val=3") == nil { + t.Error("validator is not being called") + } +} + +func TestListOptsWithoutValidator(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + o.Set("bar") + if o.Len() != 2 { + t.Errorf("%d != 2", o.Len()) + } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } + if !o.Get("bar") { + t.Error("o.Get(\"bar\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("foo") + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) + } + o.Set("foo=bar") + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) + } +} + +func TestValidateDNSSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + } + + for _, domain := range valid { + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func logOptsValidator(val string) (string, error) { + allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} + vals := strings.Split(val, "=") + if allowedKeys[vals[0]] != "" { + return val, nil + } + return "", fmt.Errorf("invalid key %s", vals[0]) +} + +func TestNamedListOpts(t *testing.T) { + var v []string + o := NewNamedListOptsRef("foo-name", &v, nil) + + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + if o.Name() != "foo-name" { + t.Errorf("%s != foo-name", o.Name()) + } + if len(v) != 1 { + t.Errorf("expected foo to be in the values, got %v", v) + } +} + +func TestNamedMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewNamedMapOpts("max-name", tmpMap, nil) + + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + if o.Name() != "max-name" { + t.Errorf("%s != max-name", o.Name()) + } + if _, exist := tmpMap["max-size"]; !exist { + t.Errorf("expected map-size to be in the values, got %v", tmpMap) + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} diff --git a/opts/opts_unix.go b/opts/opts_unix.go new file mode 100644 index 0000000..2766a43 --- /dev/null +++ b/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/opts/opts_windows.go b/opts/opts_windows.go new file mode 100644 index 0000000..98b7251 --- /dev/null +++ b/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP5. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/opts/quotedstring.go b/opts/quotedstring.go new file mode 100644 index 0000000..fb1e537 --- /dev/null +++ b/opts/quotedstring.go @@ -0,0 +1,37 @@ +package opts + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return string(*s.value) +} + +func trimQuotes(value string) string { + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/opts/quotedstring_test.go b/opts/quotedstring_test.go new file mode 100644 index 0000000..54dcbc1 --- /dev/null +++ b/opts/quotedstring_test.go @@ -0,0 +1,29 @@ +package opts + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestQuotedStringSetWithQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NoError(t, qs.Set(`"something"`)) + assert.Equal(t, "something", qs.String()) + assert.Equal(t, "something", value) +} + +func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NoError(t, qs.Set(`"something'`)) + assert.Equal(t, `"something'`, qs.String()) +} + +func TestQuotedStringSetWithNoQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NoError(t, qs.Set("something")) + assert.Equal(t, "something", qs.String()) +} diff --git a/opts/runtime.go b/opts/runtime.go new file mode 100644 index 0000000..4361b3c --- /dev/null +++ b/opts/runtime.go @@ -0,0 +1,79 @@ +package opts + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" +) + +// RuntimeOpt defines a map of Runtimes +type RuntimeOpt struct { + name string + stockRuntimeName string + values *map[string]types.Runtime +} + +// NewNamedRuntimeOpt creates a new RuntimeOpt +func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { + if ref == nil { + ref = &map[string]types.Runtime{} + } + return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *RuntimeOpt) Name() string { + return o.name +} + +// Set validates and updates the list of Runtimes +func (o *RuntimeOpt) Set(val string) error { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.TrimSpace(parts[0]) + parts[1] = strings.TrimSpace(parts[1]) + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.ToLower(parts[0]) + if parts[0] == o.stockRuntimeName { + return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) + } + + if _, ok := (*o.values)[parts[0]]; ok { + return fmt.Errorf("runtime '%s' was already defined", parts[0]) + } + + (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} + + return nil +} + +// String returns Runtime values as a string. +func (o *RuntimeOpt) String() string { + var out []string + for k := range *o.values { + out = append(out, k) + } + + return fmt.Sprintf("%v", out) +} + +// GetMap returns a map of Runtimes (name: path) +func (o *RuntimeOpt) GetMap() map[string]types.Runtime { + if o.values != nil { + return *o.values + } + + return map[string]types.Runtime{} +} + +// Type returns the type of the option +func (o *RuntimeOpt) Type() string { + return "runtime" +} diff --git a/opts/ulimit.go b/opts/ulimit.go new file mode 100644 index 0000000..a2a65fc --- /dev/null +++ b/opts/ulimit.go @@ -0,0 +1,81 @@ +package opts + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} + +// NamedUlimitOpt defines a named map of Ulimits +type NamedUlimitOpt struct { + name string + UlimitOpt +} + +var _ NamedOption = &NamedUlimitOpt{} + +// NewNamedUlimitOpt creates a new NamedUlimitOpt +func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &NamedUlimitOpt{ + name: name, + UlimitOpt: *NewUlimitOpt(ref), + } +} + +// Name returns the option name +func (o *NamedUlimitOpt) Name() string { + return o.name +} diff --git a/opts/ulimit_test.go b/opts/ulimit_test.go new file mode 100644 index 0000000..0aa3fac --- /dev/null +++ b/opts/ulimit_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "testing" + + "github.com/docker/go-units" +) + +func TestUlimitOpt(t *testing.T) { + ulimitMap := map[string]*units.Ulimit{ + "nofile": {"nofile", 1024, 512}, + } + + ulimitOpt := NewUlimitOpt(&ulimitMap) + + expected := "[nofile=512:1024]" + if ulimitOpt.String() != expected { + t.Fatalf("Expected %v, got %v", expected, ulimitOpt) + } + + // Valid ulimit append to opts + if err := ulimitOpt.Set("core=1024:1024"); err != nil { + t.Fatal(err) + } + + // Invalid ulimit type returns an error and do not append to opts + if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { + t.Fatalf("Expected error on invalid ulimit type") + } + expected = "[nofile=512:1024 core=1024:1024]" + expected2 := "[core=1024:1024 nofile=512:1024]" + result := ulimitOpt.String() + if result != expected && result != expected2 { + t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) + } + + // And test GetList + ulimits := ulimitOpt.GetList() + if len(ulimits) != 2 { + t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) + } +} diff --git a/packaging/docker-engine.service b/packaging/docker-engine.service new file mode 100644 index 0000000..8c95522 --- /dev/null +++ b/packaging/docker-engine.service @@ -0,0 +1,14 @@ +[Unit] +Description=Docker Application Container Engine +After=connman.service dbus.socket + +[Service] +SmackProcessLabel=System +Type=simple +ExecStart=/usr/bin/dockerd -s overlay2 --userland-proxy=false -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 +Sockets=docker-engine.socket +Restart=always +RestartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/packaging/docker-engine.socket b/packaging/docker-engine.socket new file mode 100644 index 0000000..0145ab3 --- /dev/null +++ b/packaging/docker-engine.socket @@ -0,0 +1,13 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker-engine.service + +[Socket] +ListenStream=/run/docker.sock +SocketMode=0777 +SocketUser=root +SmackLabelIPIn=* +SmackLabelIPOut=@ + +[Install] +WantedBy=sockets.target diff --git a/packaging/docker-engine.spec b/packaging/docker-engine.spec index 230c581..d42d8aa 100755 --- a/packaging/docker-engine.spec +++ b/packaging/docker-engine.spec @@ -1,29 +1,24 @@ -%define docker_path /src/github.com/docker -%define runc_path /src/github.com/opencontainers -%define container_path /src/github.com/containerd %define go_version 1.8.3 -%define orig_version %{version}-ce +%define orig_version 17.06.0-dev +%define git_commit 5937f93014e04816aa39a7ebf714fb1b39fddeca Name: docker-engine -Version: 17.06.1 +Version: 0.7.0 Release: 0 Summary: The open-source application container engine Group: Tools/Docker License: ASL 2.0 +ExclusiveArch: armv7l Source0: %{name}.tar.gz Source1: %{name}.manifest -Source11: go1.8.3.linux-amd64.tar.gz -Source12: go1.8.3.linux-armv7.tar.gz -Source13: docker-ce.tar.bz2 -Source14: containerd.tar.bz2 -Source15: runc.tar.bz2 +Source2: %{name}.service +Source3: %{name}.socket +Source11: go1.8.3.linux-armv7.tar.gz -BuildRequires: git +## BuildRequires: git Requires(post): /sbin/ldconfig Requires(postun): /sbin/ldconfig -Requires: docker-containerd -Requires: docker-runc %description @@ -38,97 +33,52 @@ for deploying and scaling web apps, databases, and backend services without depending on a particular stack or provider. - %prep %setup -q %ifarch armv7l -cp %{SOURCE12} . -tar -zxf %{SOURCE12} -rm -f %{_builddir}/%{name}-%{version}/go%{go_version}.linux-armv7.tar.gz -%endif -%ifarch x86_64 cp %{SOURCE11} . tar -zxf %{SOURCE11} -rm -f %{_builddir}/%{name}-%{version}/go%{go_version}.linux-amd64.tar.gz +#rm -f %{_builddir}/%{name}-%{version}/go%{go_version}.linux-armv7.tar.gz %endif chmod g-w %_sourcedir/* cp %{SOURCE1} ./%{name}.manifest -# docker-ce tar file -cp %{SOURCE13} . -tar -xvf %{SOURCE13} - -# containerd tar file -cp %{SOURCE14} . -tar -xvf %{SOURCE14} - -# runc tar file -cp %{SOURCE15} . -tar -xvf %{SOURCE15} - -## set docker build folder -mkdir -p .%docker_path/ -mkdir -p .%docker_path/engine -mkdir -p .%docker_path/cli -mkdir -p .%container_path/containerd -mkdir -p .%runc_path/ -mkdir -p .%runc_path/runc - -cd ext/docker-ce/components/engine -mv `ls . | grep -v packaging | grep -v src` ../../../..%docker_path/engine -cd ../cli/ -mv `ls . | grep -v packaging | grep -v src` ../../../..%docker_path/cli -cd ../../../containerd/ -mv `ls . | grep -v packaging | grep -v src` ../..%container_path/containerd -cd ../runc/ -mv `ls . | grep -v packaging | grep -v src` ../..%runc_path/runc -cd ../../ - +## service +cp %{SOURCE2} ./%{name}.services +cp %{SOURCE3} ./%{name}.socket %build -cd .%docker_path/engine export GOROOT=%{_builddir}/%{name}-%{version}/go export PATH=$PATH:%{_builddir}/%{name}-%{version}/go/bin export GOPATH=%{_builddir}/%{name}-%{version} export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' +export DOCKER_GITCOMMIT=%{git_commit} + go env -echo 'docker daemon' export CGO_ENABLED=1 -AUTO_GOPATH=1 ./hack/make.sh dynbinary - -echo 'docker cli' -cd ../cli -make binary +AUTO_GOPATH=1 ./hack/make.sh dynbinary-docker -export BUILDTAGS=no_btrfs -echo 'containerd build' -cd ../../containerd/containerd -make - -echo 'runc build' -cd ../../opencontainers/runc -make all BUILDTAGS="" %install -rm -rf %{buildroot} # install binary install -d $RPM_BUILD_ROOT/%{_bindir} -install -p -m 755 .%docker_path/cli/build/docker-linux-arm $RPM_BUILD_ROOT/%{_bindir}/docker -install -p -m 755 .%docker_path/engine/bundles/%{orig_version}/dynbinary-daemon/dockerd-%{orig_version} $RPM_BUILD_ROOT/%{_bindir}/dockerd -install -p -m 755 .%runc_path/runc/runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc -install -p -m 755 .%container_path/containerd/bin/containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd -install -p -m 755 .%container_path/containerd/bin/containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim -install -p -m 755 .%container_path/containerd/bin/ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr +install -p -m 755 %{_builddir}/%{name}-%{version}/bundles/%{orig_version}/dynbinary-docker/docker $RPM_BUILD_ROOT/%{_bindir}/docker + +%post +ln -sf docker $RPM_BUILD_ROOT/%{_bindir}/docker-containerd +ln -sf docker $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr +ln -sf docker $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim +ln -sf docker $RPM_BUILD_ROOT/%{_bindir}/docker-proxy +ln -sf docker $RPM_BUILD_ROOT/%{_bindir}/docker-runc +ln -sf docker $RPM_BUILD_ROOT/%{_bindir}/dockerd + +%postun +rm -rf %{buildroot} %files %manifest %{name}.manifest %defattr(-,root,root,-) -/%{_bindir}/docker -/%{_bindir}/dockerd -/%{_bindir}/docker-containerd -/%{_bindir}/docker-containerd-shim -/%{_bindir}/docker-containerd-ctr -/%{_bindir}/docker-runc -%license .%docker_path/engine/LICENSE +/%{_bindir}/docker* +%license ./LICENSE diff --git a/pkg/README.md b/pkg/README.md new file mode 100644 index 0000000..c4b78a8 --- /dev/null +++ b/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. + +Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Docker organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/pkg/aaparser/aaparser.go b/pkg/aaparser/aaparser.go new file mode 100644 index 0000000..5de4a4d --- /dev/null +++ b/pkg/aaparser/aaparser.go @@ -0,0 +1,89 @@ +// Package aaparser is a convenience package interacting with `apparmor_parser`. +package aaparser + +import ( + "fmt" + "os/exec" + "strconv" + "strings" +) + +const ( + binary = "apparmor_parser" +) + +// GetVersion returns the major and minor version of apparmor_parser. +func GetVersion() (int, error) { + output, err := cmd("", "--version") + if err != nil { + return -1, err + } + + return parseVersion(output) +} + +// LoadProfile runs `apparmor_parser -Kr` on a specified apparmor profile to +// replace the profile. The `-K` is necessary to make sure that apparmor_parser +// doesn't try to write to a read-only filesystem. +func LoadProfile(profilePath string) error { + _, err := cmd("", "-Kr", profilePath) + return err +} + +// cmd runs `apparmor_parser` with the passed arguments. +func cmd(dir string, arg ...string) (string, error) { + c := exec.Command(binary, arg...) + c.Dir = dir + + output, err := c.CombinedOutput() + if err != nil { + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) + } + + return string(output), nil +} + +// parseVersion takes the output from `apparmor_parser --version` and returns +// a representation of the {major, minor, patch} version as a single number of +// the form MMmmPPP {major, minor, patch}. +func parseVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, err + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, err + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, err + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} diff --git a/pkg/aaparser/aaparser_test.go b/pkg/aaparser/aaparser_test.go new file mode 100644 index 0000000..69bc8d2 --- /dev/null +++ b/pkg/aaparser/aaparser_test.go @@ -0,0 +1,73 @@ +package aaparser + +import ( + "testing" +) + +type versionExpected struct { + output string + version int +} + +func TestParseVersion(t *testing.T) { + versions := []versionExpected{ + { + output: `AppArmor parser version 2.10 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 210000, + }, + { + output: `AppArmor parser version 2.8 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 208000, + }, + { + output: `AppArmor parser version 2.20 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 220000, + }, + { + output: `AppArmor parser version 2.05 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 205000, + }, + { + output: `AppArmor parser version 2.9.95 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 209095, + }, + { + output: `AppArmor parser version 3.14.159 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 314159, + }, + } + + for _, v := range versions { + version, err := parseVersion(v.output) + if err != nil { + t.Fatalf("expected error to be nil for %#v, got: %v", v, err) + } + if version != v.version { + t.Fatalf("expected version to be %d, was %d, for: %#v\n", v.version, version, v) + } + } +} diff --git a/pkg/archive/README.md b/pkg/archive/README.md new file mode 100644 index 0000000..7307d96 --- /dev/null +++ b/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go new file mode 100644 index 0000000..ee599f3 --- /dev/null +++ b/pkg/archive/archive.go @@ -0,0 +1,1195 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver allows the reuse of most utility functions of this package +// with a pluggable Untar function. Also, to facilitate the passing of +// specific id mappings for untar, an archiver can be created with maps +// which will then be passed to Untar operations +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresseses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + + if err := file.Sync(); err != nil { + file.Close() + return err + } + + if err := unix.Fadvise(int(file.Fd()), 0, 0, unix.FADV_DONTNEED); err != nil { + file.Close() + return err + } + + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMappings, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMappings.UIDs(), + GIDMaps: archiver.IDMappings.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMappings.UIDs(), + GIDMaps: archiver.IDMappings.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMappings.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMappings, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/pkg/archive/archive_linux.go b/pkg/archive/archive_linux.go new file mode 100644 index 0000000..f072534 --- /dev/null +++ b/pkg/archive/archive_linux.go @@ -0,0 +1,92 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/pkg/archive/archive_linux_test.go b/pkg/archive/archive_linux_test.go new file mode 100644 index 0000000..d5f046e --- /dev/null +++ b/pkg/archive/archive_linux_test.go @@ -0,0 +1,187 @@ +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" +) + +// setupOverlayTestDir creates files in a directory with overlay whiteouts +// Tree layout +// . +// ├── d1 # opaque, 0700 +// │   └── f1 # empty file, 0600 +// ├── d2 # opaque, 0750 +// │   └── f1 # empty file, 0660 +// └── d3 # 0700 +// └── f1 # whiteout, 0644 +func setupOverlayTestDir(t *testing.T, src string) { + // Create opaque directory containing single file and permission 0700 + if err := os.Mkdir(filepath.Join(src, "d1"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600); err != nil { + t.Fatal(err) + } + + // Create another opaque directory containing single file but with permission 0750 + if err := os.Mkdir(filepath.Join(src, "d2"), 0750); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660); err != nil { + t.Fatal(err) + } + + // Create regular directory with deleted file + if err := os.Mkdir(filepath.Join(src, "d3"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Mknod(filepath.Join(src, "d3", "f1"), syscall.S_IFCHR, 0); err != nil { + t.Fatal(err) + } +} + +func checkOpaqueness(t *testing.T, path string, opaque string) { + xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + t.Fatal(err) + } + if string(xattrOpaque) != opaque { + t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) + } + +} + +func checkOverlayWhiteout(t *testing.T, path string) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) + } + if statT.Rdev != 0 { + t.Fatalf("Non-zero device number for whiteout") + } +} + +func checkFileMode(t *testing.T, path string, perm os.FileMode) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + if stat.Mode() != perm { + t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) + } +} + +func TestOverlayTarUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + options := &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + } + archive, err := TarWithOptions(src, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, options); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", "f1"), os.ModeCharDevice|os.ModeDevice) + + checkOpaqueness(t, filepath.Join(dst, "d1"), "y") + checkOpaqueness(t, filepath.Join(dst, "d2"), "y") + checkOpaqueness(t, filepath.Join(dst, "d3"), "") + checkOverlayWhiteout(t, filepath.Join(dst, "d3", "f1")) +} + +func TestOverlayTarAUFSUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + archive, err := TarWithOptions(src, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + }) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: AUFSWhiteoutFormat, + }); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2", WhiteoutOpaqueDir), 0750) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", WhiteoutPrefix+"f1"), 0600) +} diff --git a/pkg/archive/archive_other.go b/pkg/archive/archive_other.go new file mode 100644 index 0000000..54acbf2 --- /dev/null +++ b/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go new file mode 100644 index 0000000..e85878f --- /dev/null +++ b/pkg/archive/archive_test.go @@ -0,0 +1,1260 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var tmp string + +func init() { + tmp = "/tmp/" + if runtime.GOOS == "windows" { + tmp = os.Getenv("TEMP") + `\` + } +} + +var defaultArchiver = NewDefaultArchiver() + +func defaultTarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func defaultUntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func defaultCopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +func defaultCopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func TestIsArchivePathDir(t *testing.T) { + cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archivedir") { + t.Fatalf("Incorrectly recognised directory as an archive") + } +} + +func TestIsArchivePathInvalidFile(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archive") { + t.Fatalf("Incorrectly recognised invalid tar path as archive") + } + if IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") + } +} + +func TestIsArchivePathTar(t *testing.T) { + var whichTar string + if runtime.GOOS == "solaris" { + whichTar = "gtar" + } else { + whichTar = "tar" + } + cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) + cmd := exec.Command("sh", "-c", cmdStr) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if !IsArchivePath(tmp + "/archive") { + t.Fatalf("Did not recognise valid tar path as archive") + } + if !IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Did not recognise valid compressed tar path as archive") + } +} + +func testDecompressStream(t *testing.T, ext, compressCommand string) { + cmd := exec.Command("sh", "-c", + fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to create an archive file for test : %s.", output) + } + filename := "archive." + ext + archive, err := os.Open(tmp + filename) + if err != nil { + t.Fatalf("Failed to open file %s: %v", filename, err) + } + defer archive.Close() + + r, err := DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress %s: %v", filename, err) + } + if _, err = ioutil.ReadAll(r); err != nil { + t.Fatalf("Failed to read the decompressed stream: %v ", err) + } + if err = r.Close(); err != nil { + t.Fatalf("Failed to close the decompressed stream: %v ", err) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + testDecompressStream(t, "gz", "gzip -f") +} + +func TestDecompressStreamBzip2(t *testing.T) { + testDecompressStream(t, "bz2", "bzip2 -f") +} + +func TestDecompressStreamXz(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Xz not present in msys2") + } + testDecompressStream(t, "xz", "xz -f") +} + +func TestCompressStreamXzUnsupported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of an uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + // TODO Windows: Figure out why this is failing in CI but not locally + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows CI machines") + } + badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, _, err := cmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("sh", "-c", "echo hello; exit 0") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := filepath.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := filepath.Join(tempFolder, "src") + tarFile := filepath.Join(tempFolder, "src.tar") + os.Create(srcFile) + os.Create(invalidDestFolder) // being a file (not dir) should cause an error + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = defaultUntarPath(tarFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = defaultUntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = defaultUntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := filepath.Join(destFolder, srcFileU) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := filepath.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = defaultUntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = defaultUntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := filepath.Join(tempFolder, "dest") + invalidSrc := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = defaultCopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = defaultCopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = defaultCopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, filepath.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) + err = defaultCopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := filepath.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := filepath.Join(tempFolder, "doesnotexists") + err = defaultCopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = defaultCopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = defaultCopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // TODO Windows: Figure out how to port this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = defaultTarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarWithOptions(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := defaultTarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := defaultTarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} + +func TestReplaceFileTarWrapper(t *testing.T) { + filesInArchive := 20 + testcases := []struct { + doc string + filename string + modifier TarModifierFunc + expected string + fileCount int + }{ + { + doc: "Modifier creates a new file", + filename: "newfile", + modifier: createModifier(t), + expected: "the new content", + fileCount: filesInArchive + 1, + }, + { + doc: "Modifier replaces a file", + filename: "file-2", + modifier: createOrReplaceModifier, + expected: "the new content", + fileCount: filesInArchive, + }, + { + doc: "Modifier replaces the last file", + filename: fmt.Sprintf("file-%d", filesInArchive-1), + modifier: createOrReplaceModifier, + expected: "the new content", + fileCount: filesInArchive, + }, + { + doc: "Modifier appends to a file", + filename: "file-3", + modifier: appendModifier, + expected: "fooo\nnext line", + fileCount: filesInArchive, + }, + } + + for _, testcase := range testcases { + sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) + defer cleanup() + + resultArchive := ReplaceFileTarWrapper( + sourceArchive, + map[string]TarModifierFunc{testcase.filename: testcase.modifier}) + + actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) + assert.Equal(t, testcase.expected, actual, testcase.doc) + } +} + +func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + require.NoError(t, err) + + _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) + require.NoError(t, err) + + sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) + require.NoError(t, err) + return sourceArchive, func() { + os.RemoveAll(srcDir) + sourceArchive.Close() + } +} + +func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + return &tar.Header{ + Mode: 0600, + Typeflag: tar.TypeReg, + }, []byte("the new content"), nil +} + +func createModifier(t *testing.T) TarModifierFunc { + return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + assert.Nil(t, content) + return createOrReplaceModifier(path, header, content) + } +} + +func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + buffer := bytes.Buffer{} + if content != nil { + if _, err := buffer.ReadFrom(content); err != nil { + return nil, nil, err + } + } + buffer.WriteString("\nnext line") + return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil +} + +func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { + destDir, err := ioutil.TempDir("", "docker-test-destDir") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + err = Untar(archive, destDir, nil) + require.NoError(t, err) + + files, _ := ioutil.ReadDir(destDir) + assert.Len(t, files, expectedCount, doc) + + content, err := ioutil.ReadFile(filepath.Join(destDir, name)) + assert.NoError(t, err) + return string(content) +} diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go new file mode 100644 index 0000000..a33f0fe --- /dev/null +++ b/pkg/archive/archive_unix.go @@ -0,0 +1,121 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = uint64(s.Ino) + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/pkg/archive/archive_unix_test.go b/pkg/archive/archive_unix_test.go new file mode 100644 index 0000000..4eeafdd --- /dev/null +++ b/pkg/archive/archive_unix_test.go @@ -0,0 +1,249 @@ +// +build !windows + +package archive + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(filepath.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(filepath.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(filepath.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows +func TestTarUntarWithXattr(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip() + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go new file mode 100644 index 0000000..a22410c --- /dev/null +++ b/pkg/archive/archive_windows.go @@ -0,0 +1,79 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} diff --git a/pkg/archive/archive_windows_test.go b/pkg/archive/archive_windows_test.go new file mode 100644 index 0000000..685e114 --- /dev/null +++ b/pkg/archive/archive_windows_test.go @@ -0,0 +1,93 @@ +// +build windows + +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestCopyFileWithInvalidDest(t *testing.T) { + // TODO Windows: This is currently failing. Not sure what has + // recently changed in CopyWithTar as used to pass. Further investigation + // is required. + t.Skip("Currently fails") + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := "c:dest" + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, "src", "src") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = defaultCopyWithTar(src, dest) + if err == nil { + t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") + } +} + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + {0755 | os.ModeDir, 0755 | os.ModeDir}, + {0755 | os.ModeSymlink, 0755 | os.ModeSymlink}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go new file mode 100644 index 0000000..5ca39b7 --- /dev/null +++ b/pkg/archive/changes.go @@ -0,0 +1,441 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/pkg/archive/changes_linux.go b/pkg/archive/changes_linux.go new file mode 100644 index 0000000..7718602 --- /dev/null +++ b/pkg/archive/changes_linux.go @@ -0,0 +1,312 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/pkg/archive/changes_other.go b/pkg/archive/changes_other.go new file mode 100644 index 0000000..da70ed3 --- /dev/null +++ b/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/pkg/archive/changes_posix_test.go b/pkg/archive/changes_posix_test.go new file mode 100644 index 0000000..095102e --- /dev/null +++ b/pkg/archive/changes_posix_test.go @@ -0,0 +1,132 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + //TODO Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip("gcp failures on Solaris") + } + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/pkg/archive/changes_test.go b/pkg/archive/changes_test.go new file mode 100644 index 0000000..eae1d02 --- /dev/null +++ b/pkg/archive/changes_test.go @@ -0,0 +1,572 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "runtime" + "sort" + "testing" + "time" + + "github.com/docker/docker/pkg/system" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if runtime.GOOS == "solaris" { + cmd = exec.Command("gcp", "-a", src, dst) + } + + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + {Symlink, "symlink3", root + "/file1", 0666}, + {Symlink, "symlink4", root + "/symlink3", 0666}, + {Symlink, "dirSymlink", root + "/dir1", 0740}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := system.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create a directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failure on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithHardlinks(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destDir) + + creationSize, err := prepareUntarSourceDirectory(100, destDir, true) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + t.Fatal(err) + } + + got := ChangesSize(destDir, changes) + if got != int64(creationSize) { + t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("Expected 6 bytes of changes, got %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/pkg/archive/changes_unix.go b/pkg/archive/changes_unix.go new file mode 100644 index 0000000..3778b73 --- /dev/null +++ b/pkg/archive/changes_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/pkg/archive/changes_windows.go b/pkg/archive/changes_windows.go new file mode 100644 index 0000000..6fd3532 --- /dev/null +++ b/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/pkg/archive/copy.go b/pkg/archive/copy.go new file mode 100644 index 0000000..0614c67 --- /dev/null +++ b/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/pkg/archive/copy_unix.go b/pkg/archive/copy_unix.go new file mode 100644 index 0000000..e305b5e --- /dev/null +++ b/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/pkg/archive/copy_unix_test.go b/pkg/archive/copy_unix_test.go new file mode 100644 index 0000000..4d5ae79 --- /dev/null +++ b/pkg/archive/copy_unix_test.go @@ -0,0 +1,978 @@ +// +build !windows + +// TODO Windows: Some of these tests may be salvageable and portable to Windows. + +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, false) +} + +func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, true) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + symlinkPath1 := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + + if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// C. Symbol link following version: +// SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseCFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + symlinkPathBad := filepath.Join(tmpDirA, "symlink1") + symlinkPath := filepath.Join(tmpDirA, "symlink3") + linkTarget := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // first to test broken link + if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + // test symbol link -> symbol link -> target + // Ensure they start out different. + if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. Symbol link following version: +// SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseDFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "symlink4") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// E. Symbol link following version: +// SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseEFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + symSrcDir := filepath.Join(tmpDirA, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now test with symbol link + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// G. Symbol link version: +// SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseGFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dirSymlink") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// H. Symbol link following version: +// SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseHFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + symSrcDir := filepath.Join(tmpDirB, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now try with symbol link of dir + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// J. Symbol link following version: +// SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/archive/copy_windows.go b/pkg/archive/copy_windows.go new file mode 100644 index 0000000..2b775b4 --- /dev/null +++ b/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go new file mode 100644 index 0000000..a2766b5 --- /dev/null +++ b/pkg/archive/diff.go @@ -0,0 +1,256 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600, "") + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMappings, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go new file mode 100644 index 0000000..8167941 --- /dev/null +++ b/pkg/archive/diff_test.go @@ -0,0 +1,386 @@ +package archive + +import ( + "archive/tar" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/docker/pkg/ioutils" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeSymLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerWhiteouts(t *testing.T) { + // TODO Windows: Figure out why this test fails + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + + wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") + if err != nil { + return + } + defer os.RemoveAll(wd) + + base := []string{ + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "baz", + "foo/", + "foo/.abc", + "foo/.bcd/", + "foo/.bcd/a", + "foo/cde/", + "foo/cde/def", + "foo/cde/efg", + "foo/fgh", + "foobar", + } + + type tcase struct { + change, expected []string + } + + tcases := []tcase{ + { + base, + base, + }, + { + []string{ + ".bay", + ".wh.baz", + "foo/", + "foo/.bce", + "foo/.wh..wh..opq", + "foo/cde/", + "foo/cde/efg", + }, + []string{ + ".bay", + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.bce", + "foo/cde/", + "foo/cde/efg", + "foobar", + }, + }, + { + []string{ + ".bay", + ".wh..baz", + ".wh.foobar", + "foo/", + "foo/.abc", + "foo/.wh.cde", + "bar/", + }, + []string{ + ".bay", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.abc", + "foo/.bce", + }, + }, + { + []string{ + ".abc", + ".wh..wh..opq", + "foobar", + }, + []string{ + ".abc", + "foobar", + }, + }, + } + + for i, tc := range tcases { + l, err := makeTestLayer(tc.change) + if err != nil { + t.Fatal(err) + } + + _, err = UnpackLayer(wd, l, nil) + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + + paths, err := readDirContents(wd) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, paths) { + t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) + } + } + +} + +func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { + tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + for _, p := range paths { + if p[len(p)-1] == filepath.Separator { + if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { + return + } + } else { + if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { + return + } + } + } + archive, err := Tar(tmpDir, Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + os.RemoveAll(tmpDir) + return err + }), nil +} + +func readDirContents(root string) ([]string, error) { + var files []string + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == root { + return nil + } + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + if info.IsDir() { + rel = rel + "/" + } + files = append(files, rel) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} diff --git a/pkg/archive/example_changes.go b/pkg/archive/example_changes.go new file mode 100644 index 0000000..cedd46a --- /dev/null +++ b/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/pkg/archive/testdata/broken.tar b/pkg/archive/testdata/broken.tar new file mode 100644 index 0000000000000000000000000000000000000000..8f10ea6b87d3eb4fed572349dfe87695603b10a5 GIT binary patch literal 13824 zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6 zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{)C_QVb?f0pB4xfD_C1pX2f z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O@R9>nS~7H1w&*U zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}& zT~f;Cd!ZOC&mX2n zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&h(Hc32JVwt-Hrj<{`vG3V< zCk?#){6BW>!9@+(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=tS5&i^O)@Me!3BwBQ`@=VE zIl)Fp0MG z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk@mb9|fb)1BuBGk_ptuvx%G~pq0Kb zb&?6Szj_3#ClOiI_3vu1e+mOX z9k`Og2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds> zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT@5hDK4~ z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr@4W|(?6Ye5$Oayf(LUxEb zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@ zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p} zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg@lop12w4VYz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$ z?d`g5*7a@bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8( zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+@sRbyfJf~*mY z#+u;OA2B@66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldTtcr!I|PQf({z2i zZs;`}x~m6ks)bXh@+($$(s>pJ`5X6~16{UfoJC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm zVNC%z6l$8Qz0LiPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb= z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI= zd);1Ux&vAHF3sW+ZYtInM5`7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5 z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr(N`)UtH54-56s#rGO&e@Q}~KNYPdQ94MZxA|gP9PSIqe@Ff$9bNNvws)xH zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1@T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#! zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD@m3s}`Yv5i3pOOat4?XSI`2YX_ literal 0 HcmV?d00001 diff --git a/pkg/archive/time_linux.go b/pkg/archive/time_linux.go new file mode 100644 index 0000000..3448569 --- /dev/null +++ b/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/pkg/archive/time_unsupported.go b/pkg/archive/time_unsupported.go new file mode 100644 index 0000000..e85aac0 --- /dev/null +++ b/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go new file mode 100644 index 0000000..01b9e92 --- /dev/null +++ b/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, r) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/pkg/archive/whiteouts.go b/pkg/archive/whiteouts.go new file mode 100644 index 0000000..d20478a --- /dev/null +++ b/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/pkg/archive/wrap.go b/pkg/archive/wrap.go new file mode 100644 index 0000000..b39d12c --- /dev/null +++ b/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/pkg/archive/wrap_test.go b/pkg/archive/wrap_test.go new file mode 100644 index 0000000..46ab366 --- /dev/null +++ b/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/pkg/authorization/api.go b/pkg/authorization/api.go new file mode 100644 index 0000000..e8d7996 --- /dev/null +++ b/pkg/authorization/api.go @@ -0,0 +1,88 @@ +package authorization + +import ( + "crypto/x509" + "encoding/json" + "encoding/pem" +) + +const ( + // AuthZApiRequest is the url for daemon request authorization + AuthZApiRequest = "AuthZPlugin.AuthZReq" + + // AuthZApiResponse is the url for daemon response authorization + AuthZApiResponse = "AuthZPlugin.AuthZRes" + + // AuthZApiImplements is the name of the interface all AuthZ plugins implement + AuthZApiImplements = "authz" +) + +// PeerCertificate is a wrapper around x509.Certificate which provides a sane +// encoding/decoding to/from PEM format and JSON. +type PeerCertificate x509.Certificate + +// MarshalJSON returns the JSON encoded pem bytes of a PeerCertificate. +func (pc *PeerCertificate) MarshalJSON() ([]byte, error) { + b := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: pc.Raw}) + return json.Marshal(b) +} + +// UnmarshalJSON populates a new PeerCertificate struct from JSON data. +func (pc *PeerCertificate) UnmarshalJSON(b []byte) error { + var buf []byte + if err := json.Unmarshal(b, &buf); err != nil { + return err + } + derBytes, _ := pem.Decode(buf) + c, err := x509.ParseCertificate(derBytes.Bytes) + if err != nil { + return err + } + *pc = PeerCertificate(*c) + return nil +} + +// Request holds data required for authZ plugins +type Request struct { + // User holds the user extracted by AuthN mechanism + User string `json:"User,omitempty"` + + // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) + UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` + + // RequestMethod holds the HTTP method (GET/POST/PUT) + RequestMethod string `json:"RequestMethod,omitempty"` + + // RequestUri holds the full HTTP uri (e.g., /v1.21/version) + RequestURI string `json:"RequestUri,omitempty"` + + // RequestBody stores the raw request body sent to the docker daemon + RequestBody []byte `json:"RequestBody,omitempty"` + + // RequestHeaders stores the raw request headers sent to the docker daemon + RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` + + // RequestPeerCertificates stores the request's TLS peer certificates in PEM format + RequestPeerCertificates []*PeerCertificate `json:"RequestPeerCertificates,omitempty"` + + // ResponseStatusCode stores the status code returned from docker daemon + ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` + + // ResponseBody stores the raw response body sent from docker daemon + ResponseBody []byte `json:"ResponseBody,omitempty"` + + // ResponseHeaders stores the response headers sent to the docker daemon + ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` +} + +// Response represents authZ plugin response +type Response struct { + // Allow indicating whether the user is allowed or not + Allow bool `json:"Allow"` + + // Msg stores the authorization message + Msg string `json:"Msg,omitempty"` + + // Err stores a message in case there's an error + Err string `json:"Err,omitempty"` +} diff --git a/pkg/authorization/api_test.go b/pkg/authorization/api_test.go new file mode 100644 index 0000000..1031949 --- /dev/null +++ b/pkg/authorization/api_test.go @@ -0,0 +1,75 @@ +package authorization + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPeerCertificateMarshalJSON(t *testing.T) { + template := &x509.Certificate{ + IsCA: true, + BasicConstraintsValid: true, + SubjectKeyId: []byte{1, 2, 3}, + SerialNumber: big.NewInt(1234), + Subject: pkix.Name{ + Country: []string{"Earth"}, + Organization: []string{"Mother Nature"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(5, 5, 5), + + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + } + // generate private key + privatekey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + publickey := &privatekey.PublicKey + + // create a self-signed certificate. template = parent + var parent = template + raw, err := x509.CreateCertificate(rand.Reader, template, parent, publickey, privatekey) + require.NoError(t, err) + + cert, err := x509.ParseCertificate(raw) + require.NoError(t, err) + + var certs = []*x509.Certificate{cert} + addr := "www.authz.com/auth" + req, err := http.NewRequest("GET", addr, nil) + require.NoError(t, err) + + req.RequestURI = addr + req.TLS = &tls.ConnectionState{} + req.TLS.PeerCertificates = certs + req.Header.Add("header", "value") + + for _, c := range req.TLS.PeerCertificates { + pcObj := PeerCertificate(*c) + + t.Run("Marshalling :", func(t *testing.T) { + raw, err = pcObj.MarshalJSON() + require.NotNil(t, raw) + require.Nil(t, err) + }) + + t.Run("UnMarshalling :", func(t *testing.T) { + err := pcObj.UnmarshalJSON(raw) + require.Nil(t, err) + require.Equal(t, "Earth", pcObj.Subject.Country[0]) + require.Equal(t, true, pcObj.IsCA) + + }) + + } + +} diff --git a/pkg/authorization/authz.go b/pkg/authorization/authz.go new file mode 100644 index 0000000..dc9a9ae --- /dev/null +++ b/pkg/authorization/authz.go @@ -0,0 +1,186 @@ +package authorization + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" +) + +const maxBodySize = 1048576 // 1MB + +// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker +// REST http session +// A context provides two method: +// Authenticate Request: +// Call authZ plugins with current REST request and AuthN response +// Request contains full HTTP packet sent to the docker daemon +// https://docs.docker.com/engine/reference/api/ +// +// Authenticate Response: +// Call authZ plugins with full info about current REST request, REST response and AuthN response +// The response from this method may contains content that overrides the daemon response +// This allows authZ plugins to filter privileged content +// +// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results +// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order +// is determined according to daemon parameters +func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { + return &Ctx{ + plugins: authZPlugins, + user: user, + userAuthNMethod: userAuthNMethod, + requestMethod: requestMethod, + requestURI: requestURI, + } +} + +// Ctx stores a single request-response interaction context +type Ctx struct { + user string + userAuthNMethod string + requestMethod string + requestURI string + plugins []Plugin + // authReq stores the cached request object for the current transaction + authReq *Request +} + +// AuthZRequest authorized the request to the docker daemon using authZ plugins +func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { + var body []byte + if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { + var err error + body, r.Body, err = drainBody(r.Body) + if err != nil { + return err + } + } + + var h bytes.Buffer + if err := r.Header.Write(&h); err != nil { + return err + } + + ctx.authReq = &Request{ + User: ctx.user, + UserAuthNMethod: ctx.userAuthNMethod, + RequestMethod: ctx.requestMethod, + RequestURI: ctx.requestURI, + RequestBody: body, + RequestHeaders: headers(r.Header), + } + + if r.TLS != nil { + for _, c := range r.TLS.PeerCertificates { + pc := PeerCertificate(*c) + ctx.authReq.RequestPeerCertificates = append(ctx.authReq.RequestPeerCertificates, &pc) + } + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZRequest(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + return nil +} + +// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins +func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { + ctx.authReq.ResponseStatusCode = rm.StatusCode() + ctx.authReq.ResponseHeaders = headers(rm.Header()) + + if sendBody(ctx.requestURI, rm.Header()) { + ctx.authReq.ResponseBody = rm.RawBody() + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZResponse(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + rm.FlushAll() + + return nil +} + +// drainBody dump the body (if its length is less than 1MB) without modifying the request state +func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { + bufReader := bufio.NewReaderSize(body, maxBodySize) + newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + data, err := bufReader.Peek(maxBodySize) + // Body size exceeds max body size + if err == nil { + logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + return nil, newBody, nil + } + // Body size is less than maximum size + if err == io.EOF { + return data, newBody, nil + } + // Unknown error + return nil, newBody, err +} + +// sendBody returns true when request/response body should be sent to AuthZPlugin +func sendBody(url string, header http.Header) bool { + // Skip body for auth endpoint + if strings.HasSuffix(url, "/auth") { + return false + } + + // body is sent only for text or json messages + return header.Get("Content-Type") == "application/json" +} + +// headers returns flatten version of the http headers excluding authorization +func headers(header http.Header) map[string]string { + v := make(map[string]string, 0) + for k, values := range header { + // Skip authorization headers + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { + continue + } + for _, val := range values { + v[k] = val + } + } + return v +} + +// authorizationError represents an authorization deny error +type authorizationError struct { + error +} + +// HTTPErrorStatusCode returns the authorization error status code (forbidden) +func (e authorizationError) HTTPErrorStatusCode() int { + return http.StatusForbidden +} + +func newAuthorizationError(plugin, msg string) authorizationError { + return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} +} diff --git a/pkg/authorization/authz_unix_test.go b/pkg/authorization/authz_unix_test.go new file mode 100644 index 0000000..a787f3c --- /dev/null +++ b/pkg/authorization/authz_unix_test.go @@ -0,0 +1,282 @@ +// +build !windows + +// TODO Windows: This uses a Unix socket for testing. This might be possible +// to port to Windows using a named pipe instead. + +package authorization + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" + "github.com/gorilla/mux" +) + +const ( + pluginAddress = "authz-test-plugin.sock" +) + +func TestAuthZRequestPluginError(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Err: "an error", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZRequestPlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZResponsePlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestURI: "someting.com/auth", + RequestBody: []byte("sample body"), + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZResponse(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestResponseModifier(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + m.FlushAll() + if r.Header().Get("h1") != "v1" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusInternalServerError { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +func TestDrainBody(t *testing.T) { + tests := []struct { + length int // length is the message length send to drainBody + expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called + }{ + {10, 10}, // Small message size + {maxBodySize - 1, maxBodySize - 1}, // Max message size + {maxBodySize * 2, 0}, // Large message size (skip copying body) + + } + + for _, test := range tests { + msg := strings.Repeat("a", test.length) + body, closer, err := drainBody(ioutil.NopCloser(bytes.NewReader([]byte(msg)))) + if err != nil { + t.Fatal(err) + } + if len(body) != test.expectedBodyLength { + t.Fatalf("Body must be copied, actual length: '%d'", len(body)) + } + if closer == nil { + t.Fatal("Closer must not be nil") + } + modified, err := ioutil.ReadAll(closer) + if err != nil { + t.Fatalf("Error must not be nil: '%v'", err) + } + if len(modified) != len(msg) { + t.Fatalf("Result should not be truncated. Original length: '%d', new length: '%d'", len(msg), len(modified)) + } + } +} + +func TestResponseModifierOverride(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + overrideHeader := make(http.Header) + overrideHeader.Add("h1", "v2") + overrideHeaderBytes, err := json.Marshal(overrideHeader) + if err != nil { + t.Fatalf("override header failed %v", err) + } + + m.OverrideHeader(overrideHeaderBytes) + m.OverrideBody([]byte("override body")) + m.OverrideStatusCode(http.StatusNotFound) + m.FlushAll() + if r.Header().Get("h1") != "v2" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusNotFound { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +// createTestPlugin creates a new sample authorization plugin +func createTestPlugin(t *testing.T) *authorizationPlugin { + pwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + client, err := plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatalf("Failed to create client %v", err) + } + + return &authorizationPlugin{name: "plugin", plugin: client} +} + +// AuthZPluginTestServer is a simple server that implements the authZ plugin interface +type authZPluginTestServer struct { + listener net.Listener + t *testing.T + // request stores the request sent from the daemon to the plugin + recordedRequest Request + // response stores the response sent from the plugin to the daemon + replayResponse Response + server *httptest.Server +} + +// start starts the test server that implements the plugin +func (t *authZPluginTestServer) start() { + r := mux.NewRouter() + l, err := net.Listen("unix", pluginAddress) + if err != nil { + t.t.Fatal(err) + } + t.listener = l + r.HandleFunc("/Plugin.Activate", t.activate) + r.HandleFunc("/"+AuthZApiRequest, t.auth) + r.HandleFunc("/"+AuthZApiResponse, t.auth) + t.server = &httptest.Server{ + Listener: l, + Config: &http.Server{ + Handler: r, + Addr: pluginAddress, + }, + } + t.server.Start() +} + +// stop stops the test server that implements the plugin +func (t *authZPluginTestServer) stop() { + t.server.Close() + os.Remove(pluginAddress) + if t.listener != nil { + t.listener.Close() + } +} + +// auth is a used to record/replay the authentication api messages +func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) { + t.recordedRequest = Request{} + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.t.Fatal(err) + } + r.Body.Close() + json.Unmarshal(body, &t.recordedRequest) + b, err := json.Marshal(t.replayResponse) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} + +func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}}) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} diff --git a/pkg/authorization/middleware.go b/pkg/authorization/middleware.go new file mode 100644 index 0000000..7789a75 --- /dev/null +++ b/pkg/authorization/middleware.go @@ -0,0 +1,110 @@ +package authorization + +import ( + "net/http" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugingetter" + "golang.org/x/net/context" +) + +// Middleware uses a list of plugins to +// handle authorization in the API requests. +type Middleware struct { + mu sync.Mutex + plugins []Plugin +} + +// NewMiddleware creates a new Middleware +// with a slice of plugins names. +func NewMiddleware(names []string, pg plugingetter.PluginGetter) *Middleware { + SetPluginGetter(pg) + return &Middleware{ + plugins: newPlugins(names), + } +} + +func (m *Middleware) getAuthzPlugins() []Plugin { + m.mu.Lock() + defer m.mu.Unlock() + return m.plugins +} + +// SetPlugins sets the plugin used for authorization +func (m *Middleware) SetPlugins(names []string) { + m.mu.Lock() + m.plugins = newPlugins(names) + m.mu.Unlock() +} + +// RemovePlugin removes a single plugin from this authz middleware chain +func (m *Middleware) RemovePlugin(name string) { + m.mu.Lock() + defer m.mu.Unlock() + plugins := m.plugins[:0] + for _, authPlugin := range m.plugins { + if authPlugin.Name() != name { + plugins = append(plugins, authPlugin) + } + } + m.plugins = plugins +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + plugins := m.getAuthzPlugins() + if len(plugins) == 0 { + return handler(ctx, w, r, vars) + } + + user := "" + userAuthNMethod := "" + + // Default authorization using existing TLS connection credentials + // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support + // and ldap) will be extracted using AuthN feature, which is tracked under: + // https://github.com/docker/docker/pull/20883 + if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { + user = r.TLS.PeerCertificates[0].Subject.CommonName + userAuthNMethod = "TLS" + } + + authCtx := NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := NewResponseModifier(w) + + var errD error + + if errD = handler(ctx, rw, r, vars); errD != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) + } + + // There's a chance that the authCtx.plugins was updated. One of the reasons + // this can happen is when an authzplugin is disabled. + plugins = m.getAuthzPlugins() + if len(plugins) == 0 { + logrus.Debug("There are no authz plugins in the chain") + return nil + } + + authCtx.plugins = plugins + + if err := authCtx.AuthZResponse(rw, r); errD == nil && err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if errD != nil { + return errD + } + + return nil + } +} diff --git a/pkg/authorization/middleware_test.go b/pkg/authorization/middleware_test.go new file mode 100644 index 0000000..fc74011 --- /dev/null +++ b/pkg/authorization/middleware_test.go @@ -0,0 +1,53 @@ +package authorization + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/stretchr/testify/require" +) + +func TestMiddleware(t *testing.T) { + pluginNames := []string{"testPlugin1", "testPlugin2"} + var pluginGetter plugingetter.PluginGetter + m := NewMiddleware(pluginNames, pluginGetter) + authPlugins := m.getAuthzPlugins() + require.Equal(t, 2, len(authPlugins)) + require.EqualValues(t, pluginNames[0], authPlugins[0].Name()) + require.EqualValues(t, pluginNames[1], authPlugins[1].Name()) +} + +func TestNewResponseModifier(t *testing.T) { + recorder := httptest.NewRecorder() + modifier := NewResponseModifier(recorder) + modifier.Header().Set("H1", "V1") + modifier.Write([]byte("body")) + require.False(t, modifier.Hijacked()) + modifier.WriteHeader(http.StatusInternalServerError) + require.NotNil(t, modifier.RawBody()) + + raw, err := modifier.RawHeaders() + require.NotNil(t, raw) + require.Nil(t, err) + + headerData := strings.Split(strings.TrimSpace(string(raw)), ":") + require.EqualValues(t, "H1", strings.TrimSpace(headerData[0])) + require.EqualValues(t, "V1", strings.TrimSpace(headerData[1])) + + modifier.Flush() + modifier.FlushAll() + + if recorder.Header().Get("H1") != "V1" { + t.Fatalf("Header value must exists %s", recorder.Header().Get("H1")) + } + +} + +func setAuthzPlugins(m *Middleware, plugins []Plugin) { + m.mu.Lock() + m.plugins = plugins + m.mu.Unlock() +} diff --git a/pkg/authorization/middleware_unix_test.go b/pkg/authorization/middleware_unix_test.go new file mode 100644 index 0000000..fd684f1 --- /dev/null +++ b/pkg/authorization/middleware_unix_test.go @@ -0,0 +1,65 @@ +// +build !windows + +package authorization + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestMiddlewareWrapHandler(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + pluginNames := []string{authZPlugin.name} + + var pluginGetter plugingetter.PluginGetter + middleWare := NewMiddleware(pluginNames, pluginGetter) + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return nil + } + + authList := []Plugin{authZPlugin} + middleWare.SetPlugins([]string{"My Test Plugin"}) + setAuthzPlugins(middleWare, authList) + mdHandler := middleWare.WrapHandler(handler) + require.NotNil(t, mdHandler) + + addr := "www.example.com/auth" + req, _ := http.NewRequest("GET", addr, nil) + req.RequestURI = addr + req.Header.Add("header", "value") + + resp := httptest.NewRecorder() + ctx := context.Background() + + t.Run("Error Test Case :", func(t *testing.T) { + server.replayResponse = Response{ + Allow: false, + Msg: "Server Auth Not Allowed", + } + if err := mdHandler(ctx, resp, req, map[string]string{}); err == nil { + require.Error(t, err) + } + + }) + + t.Run("Positive Test Case :", func(t *testing.T) { + server.replayResponse = Response{ + Allow: true, + Msg: "Server Auth Allowed", + } + if err := mdHandler(ctx, resp, req, map[string]string{}); err != nil { + require.NoError(t, err) + } + + }) + +} diff --git a/pkg/authorization/plugin.go b/pkg/authorization/plugin.go new file mode 100644 index 0000000..939f926 --- /dev/null +++ b/pkg/authorization/plugin.go @@ -0,0 +1,118 @@ +package authorization + +import ( + "sync" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +// Plugin allows third party plugins to authorize requests and responses +// in the context of docker API +type Plugin interface { + // Name returns the registered plugin name + Name() string + + // AuthZRequest authorizes the request from the client to the daemon + AuthZRequest(*Request) (*Response, error) + + // AuthZResponse authorizes the response from the daemon to the client + AuthZResponse(*Request) (*Response, error) +} + +// newPlugins constructs and initializes the authorization plugins based on plugin names +func newPlugins(names []string) []Plugin { + plugins := []Plugin{} + pluginsMap := make(map[string]struct{}) + for _, name := range names { + if _, ok := pluginsMap[name]; ok { + continue + } + pluginsMap[name] = struct{}{} + plugins = append(plugins, newAuthorizationPlugin(name)) + } + return plugins +} + +var getter plugingetter.PluginGetter + +// SetPluginGetter sets the plugingetter +func SetPluginGetter(pg plugingetter.PluginGetter) { + getter = pg +} + +// GetPluginGetter gets the plugingetter +func GetPluginGetter() plugingetter.PluginGetter { + return getter +} + +// authorizationPlugin is an internal adapter to docker plugin system +type authorizationPlugin struct { + plugin *plugins.Client + name string + once sync.Once +} + +func newAuthorizationPlugin(name string) Plugin { + return &authorizationPlugin{name: name} +} + +func (a *authorizationPlugin) Name() string { + return a.name +} + +// Set the remote for an authz pluginv2 +func (a *authorizationPlugin) SetName(remote string) { + a.name = remote +} + +func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +// initPlugin initializes the authorization plugin if needed +func (a *authorizationPlugin) initPlugin() error { + // Lazy loading of plugins + var err error + a.once.Do(func() { + if a.plugin == nil { + var plugin plugingetter.CompatPlugin + var e error + + if pg := GetPluginGetter(); pg != nil { + plugin, e = pg.Get(a.name, AuthZApiImplements, plugingetter.Lookup) + a.SetName(plugin.Name()) + } else { + plugin, e = plugins.Get(a.name, AuthZApiImplements) + } + if e != nil { + err = e + return + } + a.plugin = plugin.Client() + } + }) + return err +} diff --git a/pkg/authorization/response.go b/pkg/authorization/response.go new file mode 100644 index 0000000..129bf2f --- /dev/null +++ b/pkg/authorization/response.go @@ -0,0 +1,203 @@ +package authorization + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net" + "net/http" + + "github.com/Sirupsen/logrus" +) + +// ResponseModifier allows authorization plugins to read and modify the content of the http.response +type ResponseModifier interface { + http.ResponseWriter + http.Flusher + http.CloseNotifier + + // RawBody returns the current http content + RawBody() []byte + + // RawHeaders returns the current content of the http headers + RawHeaders() ([]byte, error) + + // StatusCode returns the current status code + StatusCode() int + + // OverrideBody replaces the body of the HTTP reply + OverrideBody(b []byte) + + // OverrideHeader replaces the headers of the HTTP reply + OverrideHeader(b []byte) error + + // OverrideStatusCode replaces the status code of the HTTP reply + OverrideStatusCode(statusCode int) + + // FlushAll flushes all data to the HTTP response + FlushAll() error + + // Hijacked indicates the response has been hijacked by the Docker daemon + Hijacked() bool +} + +// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content +func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { + return &responseModifier{rw: rw, header: make(http.Header)} +} + +// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore +// the http request/response from docker daemon +type responseModifier struct { + // The original response writer + rw http.ResponseWriter + // body holds the response body + body []byte + // header holds the response header + header http.Header + // statusCode holds the response status code + statusCode int + // hijacked indicates the request has been hijacked + hijacked bool +} + +func (rm *responseModifier) Hijacked() bool { + return rm.hijacked +} + +// WriterHeader stores the http status code +func (rm *responseModifier) WriteHeader(s int) { + + // Use original request if hijacked + if rm.hijacked { + rm.rw.WriteHeader(s) + return + } + + rm.statusCode = s +} + +// Header returns the internal http header +func (rm *responseModifier) Header() http.Header { + + // Use original header if hijacked + if rm.hijacked { + return rm.rw.Header() + } + + return rm.header +} + +// StatusCode returns the http status code +func (rm *responseModifier) StatusCode() int { + return rm.statusCode +} + +// OverrideBody replaces the body of the HTTP response +func (rm *responseModifier) OverrideBody(b []byte) { + rm.body = b +} + +// OverrideStatusCode replaces the status code of the HTTP response +func (rm *responseModifier) OverrideStatusCode(statusCode int) { + rm.statusCode = statusCode +} + +// OverrideHeader replaces the headers of the HTTP response +func (rm *responseModifier) OverrideHeader(b []byte) error { + header := http.Header{} + if err := json.Unmarshal(b, &header); err != nil { + return err + } + rm.header = header + return nil +} + +// Write stores the byte array inside content +func (rm *responseModifier) Write(b []byte) (int, error) { + + if rm.hijacked { + return rm.rw.Write(b) + } + + rm.body = append(rm.body, b...) + return len(b), nil +} + +// Body returns the response body +func (rm *responseModifier) RawBody() []byte { + return rm.body +} + +func (rm *responseModifier) RawHeaders() ([]byte, error) { + var b bytes.Buffer + if err := rm.header.Write(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Hijack returns the internal connection of the wrapped http.ResponseWriter +func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { + + rm.hijacked = true + rm.FlushAll() + + hijacker, ok := rm.rw.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter +func (rm *responseModifier) CloseNotify() <-chan bool { + closeNotifier, ok := rm.rw.(http.CloseNotifier) + if !ok { + logrus.Error("Internal response writer doesn't support the CloseNotifier interface") + return nil + } + return closeNotifier.CloseNotify() +} + +// Flush uses the internal flush API of the wrapped http.ResponseWriter +func (rm *responseModifier) Flush() { + flusher, ok := rm.rw.(http.Flusher) + if !ok { + logrus.Error("Internal response writer doesn't support the Flusher interface") + return + } + + rm.FlushAll() + flusher.Flush() +} + +// FlushAll flushes all data to the HTTP response +func (rm *responseModifier) FlushAll() error { + // Copy the header + for k, vv := range rm.header { + for _, v := range vv { + rm.rw.Header().Add(k, v) + } + } + + // Copy the status code + // Also WriteHeader needs to be done after all the headers + // have been copied (above). + if rm.statusCode > 0 { + rm.rw.WriteHeader(rm.statusCode) + } + + var err error + if len(rm.body) > 0 { + // Write body + _, err = rm.rw.Write(rm.body) + } + + // Clean previous data + rm.body = nil + rm.statusCode = 0 + rm.header = http.Header{} + return err +} diff --git a/pkg/broadcaster/unbuffered.go b/pkg/broadcaster/unbuffered.go new file mode 100644 index 0000000..784d65d --- /dev/null +++ b/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/pkg/broadcaster/unbuffered_test.go b/pkg/broadcaster/unbuffered_test.go new file mode 100644 index 0000000..9f8e72b --- /dev/null +++ b/pkg/broadcaster/unbuffered_test.go @@ -0,0 +1,162 @@ +package broadcaster + +import ( + "bytes" + "errors" + "strings" + + "testing" +) + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestUnbuffered(t *testing.T) { + writer := new(Unbuffered) + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.Add(bufferA) + bufferB := &dummyWriter{} + writer.Add(bufferB) + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.Add(bufferC) + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test4: Test eviction on multiple simultaneous failures + bufferB.failOnWrite = true + bufferC.failOnWrite = true + bufferD := &dummyWriter{} + writer.Add(bufferD) + writer.Write([]byte("yo")) + writer.Write([]byte("ink")) + if strings.Contains(bufferB.String(), "yoink") { + t.Errorf("bufferB received write. contents: %q", bufferB) + } + if strings.Contains(bufferC.String(), "yoink") { + t.Errorf("bufferC received write. contents: %q", bufferC) + } + if g, w := bufferD.String(), "yoink"; g != w { + t.Errorf("bufferD = %q, want %q", g, w) + } + + writer.Clean() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceUnbuffered(t *testing.T) { + writer := new(Unbuffered) + c := make(chan bool) + go func() { + writer.Add(devNullCloser(0)) + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +func BenchmarkUnbuffered(b *testing.B) { + writer := new(Unbuffered) + setUpWriter := func() { + for i := 0; i < 100; i++ { + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + } + } + testLine := "Line that thinks that it is log line from docker" + var buf bytes.Buffer + for i := 0; i < 100; i++ { + buf.Write([]byte(testLine + "\n")) + } + // line without eol + buf.Write([]byte(testLine)) + testText := buf.Bytes() + b.SetBytes(int64(5 * len(testText))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + setUpWriter() + b.StartTimer() + + for j := 0; j < 5; j++ { + if _, err := writer.Write(testText); err != nil { + b.Fatal(err) + } + } + + b.StopTimer() + writer.Clean() + b.StartTimer() + } +} diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go new file mode 100644 index 0000000..7604418 --- /dev/null +++ b/pkg/chrootarchive/archive.go @@ -0,0 +1,70 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &archive.Archiver{Untar: Untar, IDMappings: idMappings} +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go new file mode 100644 index 0000000..bd2deb2 --- /dev/null +++ b/pkg/chrootarchive/archive_test.go @@ -0,0 +1,412 @@ +package chrootarchive + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +func init() { + reexec.Init() +} + +var chrootArchiver = NewArchiver(nil) + +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} + +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} + +// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of +// local images) +func TestChrootUntarWithHugeExcludesList(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + options := &archive.TarOptions{} + //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow + //on most systems when passed via environment or command line arguments + excludes := make([]string, 65534) + for i := 0; i < 65534; i++ { + excludes[i] = strings.Repeat(string(i), 64) + } + options.ExcludePatterns = excludes + if err := Untar(stream, dest, options); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarEmptyArchive(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := Untar(nil, tmpdir, nil); err == nil { + t.Fatal("expected error on empty archive") + } +} + +func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeSymLinks { + if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func getHash(filename string) (uint32, error) { + stream, err := ioutil.ReadFile(filename) + if err != nil { + return 0, err + } + hash := crc32.NewIEEE() + hash.Write(stream) + return hash.Sum32(), nil +} + +func compareDirectories(src string, dest string) error { + changes, err := archive.ChangesDirs(dest, src) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("Unexpected differences after untar: %v", changes) + } + return nil +} + +func compareFiles(src string, dest string) error { + srcHash, err := getHash(src) + if err != nil { + return err + } + destHash, err := getHash(dest) + if err != nil { + return err + } + if srcHash != destHash { + return fmt.Errorf("%s is different from %s", src, dest) + } + return nil +} + +func TestChrootTarUntarWithSymlink(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := TarUntar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyWithTar(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("Failing on Windows and Solaris") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyWithTar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyFileWithTar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyFileWithTar(src, dest); err == nil { + t.Fatal("Expected error on copying directory") + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyFileWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarPath(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + // Untar a directory + if err := UntarPath(src, dest); err == nil { + t.Fatal("Expected error on untaring a directory") + } + + // Untar a tar file + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(stream) + tarfile := filepath.Join(tmpdir, "src.tar") + if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil { + t.Fatal(err) + } + if err := UntarPath(tarfile, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyDotDotFile(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700, ""); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700, ""); err != nil { + t.Fatal(err) + } + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/chrootarchive/archive_unix.go b/pkg/chrootarchive/archive_unix.go new file mode 100644 index 0000000..f2325ab --- /dev/null +++ b/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/pkg/chrootarchive/archive_windows.go b/pkg/chrootarchive/archive_windows.go new file mode 100644 index 0000000..0a500ed --- /dev/null +++ b/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/pkg/chrootarchive/chroot_linux.go b/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 0000000..f9d7fed --- /dev/null +++ b/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,108 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := syscall.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/pkg/chrootarchive/chroot_unix.go b/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 0000000..16354bf --- /dev/null +++ b/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive + +import "syscall" + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} diff --git a/pkg/chrootarchive/diff.go b/pkg/chrootarchive/diff.go new file mode 100644 index 0000000..49acad7 --- /dev/null +++ b/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/pkg/chrootarchive/diff_unix.go b/pkg/chrootarchive/diff_unix.go new file mode 100644 index 0000000..33098b3 --- /dev/null +++ b/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/pkg/chrootarchive/diff_windows.go b/pkg/chrootarchive/diff_windows.go new file mode 100644 index 0000000..dc07eb6 --- /dev/null +++ b/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/pkg/chrootarchive/init_unix.go b/pkg/chrootarchive/init_unix.go new file mode 100644 index 0000000..4f637f1 --- /dev/null +++ b/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/pkg/chrootarchive/init_windows.go b/pkg/chrootarchive/init_windows.go new file mode 100644 index 0000000..fa17c9b --- /dev/null +++ b/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go new file mode 100644 index 0000000..73433c0 --- /dev/null +++ b/pkg/devicemapper/devmapper.go @@ -0,0 +1,829 @@ +// +build linux,cgo + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// DevmapperLogger defines methods for logging with devicemapper. +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + runtime.KeepAlive(t) + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) + return ErrUdevWait + } + return nil +} + +// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger + +// LogInit initializes the logger for the device mapper library. +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(cookie) + + dmSawBusy = false // reset before the task is run + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all follwing calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(cookie) + + if err = task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/devicemapper/devmapper_log.go b/pkg/devicemapper/devmapper_log.go new file mode 100644 index 0000000..7dd0c60 --- /dev/null +++ b/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,35 @@ +// +build linux,cgo + +package devicemapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// DevmapperLogCallback exports the devmapper log callback for cgo. +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 0000000..4f270fb --- /dev/null +++ b/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,251 @@ +// +build linux,cgo + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 0000000..5bdd97d --- /dev/null +++ b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,34 @@ +// +build linux,cgo,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ +import "C" + +// LibraryDeferredRemovalSupport is supported when statically linked. +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 0000000..968b2ce --- /dev/null +++ b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,cgo,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport is not supported when statically linked. +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/pkg/devicemapper/ioctl.go b/pkg/devicemapper/ioctl.go new file mode 100644 index 0000000..b745291 --- /dev/null +++ b/pkg/devicemapper/ioctl.go @@ -0,0 +1,27 @@ +// +build linux,cgo + +package devicemapper + +import ( + "syscall" + "unsafe" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/pkg/devicemapper/log.go b/pkg/devicemapper/log.go new file mode 100644 index 0000000..cee5e54 --- /dev/null +++ b/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/pkg/directory/directory.go b/pkg/directory/directory.go new file mode 100644 index 0000000..1715ef4 --- /dev/null +++ b/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/pkg/directory/directory_test.go b/pkg/directory/directory_test.go new file mode 100644 index 0000000..2b7a465 --- /dev/null +++ b/pkg/directory/directory_test.go @@ -0,0 +1,192 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "testing" +) + +// Size of an empty directory should be 0 +func TestSizeEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("empty directory has size: %d", size) + } +} + +// Size of a directory with one empty file should be 0 +func TestSizeEmptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + var size int64 + if size, _ = Size(file.Name()); size != 0 { + t.Fatalf("directory with one file has size: %d", size) + } +} + +// Size of a directory with one 5-byte file should be 5 +func TestSizeNonemptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{97, 98, 99, 100, 101} + file.Write(d) + + var size int64 + if size, _ = Size(file.Name()); size != 5 { + t.Fatalf("directory with one 5-byte file has size: %d", size) + } +} + +// Size of a directory with one empty directory should be 0 +func TestSizeNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("directory with one empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 empty directory +func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{100, 111, 99, 107, 101, 114} + file.Write(d) + + var size int64 + if size, _ = Size(dir); size != 6 { + t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 non-empty directory +func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { + var dir, dirNested string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + data := []byte{100, 111, 99, 107, 101, 114} + file.Write(data) + + var nestedFile *os.File + if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { + t.Fatalf("failed to create file in nested directory: %s", err) + } + + nestedData := []byte{100, 111, 99, 107, 101, 114} + nestedFile.Write(nestedData) + + var size int64 + if size, _ = Size(dir); size != 12 { + t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) + } +} + +// Test migration of directory to a subdir underneath itself +func TestMoveToSubdir(t *testing.T) { + var outerDir, subDir string + var err error + + if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil { + t.Fatalf("failed to create directory: %v", err) + } + + if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil { + t.Fatalf("failed to create subdirectory: %v", err) + } + + // write 4 temp files in the outer dir to get moved + filesList := []string{"a", "b", "c", "d"} + for _, fName := range filesList { + if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil { + t.Fatalf("couldn't create temp file %q: %v", fName, err) + } else { + file.WriteString(fName) + file.Close() + } + } + + if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil { + t.Fatalf("Error during migration of content to subdirectory: %v", err) + } + // validate that the files were moved to the subdirectory + infos, err := ioutil.ReadDir(subDir) + if err != nil { + t.Fatal(err) + } + if len(infos) != 4 { + t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos)) + } + var results []string + for _, info := range infos { + results = append(results, info.Name()) + } + sort.Sort(sort.StringSlice(results)) + if !reflect.DeepEqual(filesList, results) { + t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results) + } +} + +// Test a non-existing directory +func TestSizeNonExistingDirectory(t *testing.T) { + if _, err := Size("/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil { + t.Fatalf("error is expected") + } +} diff --git a/pkg/directory/directory_unix.go b/pkg/directory/directory_unix.go new file mode 100644 index 0000000..397251b --- /dev/null +++ b/pkg/directory/directory_unix.go @@ -0,0 +1,48 @@ +// +build linux freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/pkg/directory/directory_windows.go b/pkg/directory/directory_windows.go new file mode 100644 index 0000000..6fb0917 --- /dev/null +++ b/pkg/directory/directory_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/pkg/discovery/README.md b/pkg/discovery/README.md new file mode 100644 index 0000000..d8ed9ce --- /dev/null +++ b/pkg/discovery/README.md @@ -0,0 +1,41 @@ +--- +page_title: Docker discovery +page_description: discovery +page_keywords: docker, clustering, discovery +--- + +# Discovery + +Docker comes with multiple Discovery backends. + +## Backends + +### Using etcd + +Point your Docker Engine instances to a common etcd instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ dockerd -H= --cluster-advertise= --cluster-store etcd://,/ +``` + +### Using consul + +Point your Docker Engine instances to a common Consul instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ dockerd -H= --cluster-advertise= --cluster-store consul:/// +``` + +### Using zookeeper + +Point your Docker Engine instances to a common Zookeeper instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ dockerd -H= --cluster-advertise= --cluster-store zk://,/ +``` diff --git a/pkg/discovery/backends.go b/pkg/discovery/backends.go new file mode 100644 index 0000000..2eab550 --- /dev/null +++ b/pkg/discovery/backends.go @@ -0,0 +1,107 @@ +package discovery + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/Sirupsen/logrus" +) + +var ( + // Backends is a global map of discovery backends indexed by their + // associated scheme. + backends = make(map[string]Backend) +) + +// Register makes a discovery backend available by the provided scheme. +// If Register is called twice with the same scheme an error is returned. +func Register(scheme string, d Backend) error { + if _, exists := backends[scheme]; exists { + return fmt.Errorf("scheme already registered %s", scheme) + } + logrus.WithField("name", scheme).Debugf("Registering discovery service") + backends[scheme] = d + return nil +} + +func parse(rawurl string) (string, string) { + parts := strings.SplitN(rawurl, "://", 2) + + // nodes:port,node2:port => nodes://node1:port,node2:port + if len(parts) == 1 { + return "nodes", parts[0] + } + return parts[0], parts[1] +} + +// ParseAdvertise parses the --cluster-advertise daemon config which accepts +// : or : +func ParseAdvertise(advertise string) (string, error) { + var ( + iface *net.Interface + addrs []net.Addr + err error + ) + + addr, port, err := net.SplitHostPort(advertise) + + if err != nil { + return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) + } + + ip := net.ParseIP(addr) + // If it is a valid ip-address, use it as is + if ip != nil { + return advertise, nil + } + + // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise + ifaceName := addr + iface, err = net.InterfaceByName(ifaceName) + if err != nil { + return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) + } + + addrs, err = iface.Addrs() + if err != nil { + return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) + } + + if len(addrs) == 0 { + return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) + } + + addr = "" + for _, a := range addrs { + ip, _, err := net.ParseCIDR(a.String()) + if err != nil { + return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) + } + if ip.To4() == nil || ip.IsLoopback() { + continue + } + addr = ip.String() + break + } + if addr == "" { + return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise) + } + + addr = net.JoinHostPort(addr, port) + return addr, nil +} + +// New returns a new Discovery given a URL, heartbeat and ttl settings. +// Returns an error if the URL scheme is not supported. +func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { + scheme, uri := parse(rawurl) + if backend, exists := backends[scheme]; exists { + logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service") + err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) + return backend, err + } + + return nil, ErrNotSupported +} diff --git a/pkg/discovery/discovery.go b/pkg/discovery/discovery.go new file mode 100644 index 0000000..ca7f587 --- /dev/null +++ b/pkg/discovery/discovery.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "errors" + "time" +) + +var ( + // ErrNotSupported is returned when a discovery service is not supported. + ErrNotSupported = errors.New("discovery service not supported") + + // ErrNotImplemented is returned when discovery feature is not implemented + // by discovery backend. + ErrNotImplemented = errors.New("not implemented in this discovery service") +) + +// Watcher provides watching over a cluster for nodes joining and leaving. +type Watcher interface { + // Watch the discovery for entry changes. + // Returns a channel that will receive changes or an error. + // Providing a non-nil stopCh can be used to stop watching. + Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) +} + +// Backend is implemented by discovery backends which manage cluster entries. +type Backend interface { + // Watcher must be provided by every backend. + Watcher + + // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. + Initialize(string, time.Duration, time.Duration, map[string]string) error + + // Register to the discovery. + Register(string) error +} diff --git a/pkg/discovery/discovery_test.go b/pkg/discovery/discovery_test.go new file mode 100644 index 0000000..6084f3e --- /dev/null +++ b/pkg/discovery/discovery_test.go @@ -0,0 +1,137 @@ +package discovery + +import ( + "testing" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestNewEntry(c *check.C) { + entry, err := NewEntry("127.0.0.1:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "127.0.0.1:2375") + + entry, err = NewEntry("[2001:db8:0:f101::2]:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "[2001:db8:0:f101::2]:2375") + + _, err = NewEntry("127.0.0.1") + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestParse(c *check.C) { + scheme, uri := parse("127.0.0.1:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("localhost:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("scheme://127.0.0.1:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("scheme://localhost:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "") +} + +func (s *DiscoverySuite) TestCreateEntries(c *check.C) { + entries, err := CreateEntries(nil) + c.Assert(entries, check.DeepEquals, Entries{}) + c.Assert(err, check.IsNil) + + entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""}) + c.Assert(err, check.IsNil) + expected := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "2001:db8:0:f101::2", Port: "2375"}, + } + c.Assert(entries.Equals(expected), check.Equals, true) + + _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestContainsEntry(c *check.C) { + entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) + c.Assert(err, check.IsNil) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false) +} + +func (s *DiscoverySuite) TestEntriesEquality(c *check.C) { + entries := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + } + + // Same + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + }), check. + Equals, true) + + // Different size + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "127.0.0.3", Port: "2375"}, + }), check. + Equals, false) + + // Different content + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.42", Port: "2375"}, + }), check. + Equals, false) + +} + +func (s *DiscoverySuite) TestEntriesDiff(c *check.C) { + entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} + entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} + entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} + entries := Entries{entry1, entry2} + + // No diff + added, removed := entries.Diff(Entries{entry2, entry1}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 0) + + // Add + added, removed = entries.Diff(Entries{entry2, entry3, entry1}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 0) + + // Remove + added, removed = entries.Diff(Entries{entry2}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry1), check.Equals, true) + + // Add and remove + added, removed = entries.Diff(Entries{entry1, entry3}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry2), check.Equals, true) +} diff --git a/pkg/discovery/entry.go b/pkg/discovery/entry.go new file mode 100644 index 0000000..ce23bbf --- /dev/null +++ b/pkg/discovery/entry.go @@ -0,0 +1,94 @@ +package discovery + +import "net" + +// NewEntry creates a new entry. +func NewEntry(url string) (*Entry, error) { + host, port, err := net.SplitHostPort(url) + if err != nil { + return nil, err + } + return &Entry{host, port}, nil +} + +// An Entry represents a host. +type Entry struct { + Host string + Port string +} + +// Equals returns true if cmp contains the same data. +func (e *Entry) Equals(cmp *Entry) bool { + return e.Host == cmp.Host && e.Port == cmp.Port +} + +// String returns the string form of an entry. +func (e *Entry) String() string { + return net.JoinHostPort(e.Host, e.Port) +} + +// Entries is a list of *Entry with some helpers. +type Entries []*Entry + +// Equals returns true if cmp contains the same data. +func (e Entries) Equals(cmp Entries) bool { + // Check if the file has really changed. + if len(e) != len(cmp) { + return false + } + for i := range e { + if !e[i].Equals(cmp[i]) { + return false + } + } + return true +} + +// Contains returns true if the Entries contain a given Entry. +func (e Entries) Contains(entry *Entry) bool { + for _, curr := range e { + if curr.Equals(entry) { + return true + } + } + return false +} + +// Diff compares two entries and returns the added and removed entries. +func (e Entries) Diff(cmp Entries) (Entries, Entries) { + added := Entries{} + for _, entry := range cmp { + if !e.Contains(entry) { + added = append(added, entry) + } + } + + removed := Entries{} + for _, entry := range e { + if !cmp.Contains(entry) { + removed = append(removed, entry) + } + } + + return added, removed +} + +// CreateEntries returns an array of entries based on the given addresses. +func CreateEntries(addrs []string) (Entries, error) { + entries := Entries{} + if addrs == nil { + return entries, nil + } + + for _, addr := range addrs { + if len(addr) == 0 { + continue + } + entry, err := NewEntry(addr) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + return entries, nil +} diff --git a/pkg/discovery/file/file.go b/pkg/discovery/file/file.go new file mode 100644 index 0000000..2b8e27b --- /dev/null +++ b/pkg/discovery/file/file.go @@ -0,0 +1,107 @@ +package file + +import ( + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + heartbeat time.Duration + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("file", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { + s.path = path + s.heartbeat = heartbeat + return nil +} + +func parseFileContent(content []byte) []string { + var result []string + for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { + line = strings.TrimSpace(line) + // Ignoring line starts with # + if strings.HasPrefix(line, "#") { + continue + } + // Inlined # comment also ignored. + if strings.Contains(line, "#") { + line = line[0:strings.Index(line, "#")] + // Trim additional spaces caused by above stripping. + line = strings.TrimSpace(line) + } + result = append(result, discovery.Generate(line)...) + } + return result +} + +func (s *Discovery) fetch() (discovery.Entries, error) { + fileContent, err := ioutil.ReadFile(s.path) + if err != nil { + return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) + } + return discovery.CreateEntries(parseFileContent(fileContent)) +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + currentEntries, err := s.fetch() + if err != nil { + errCh <- err + } else { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + newEntries, err := s.fetch() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/pkg/discovery/file/file_test.go b/pkg/discovery/file/file_test.go new file mode 100644 index 0000000..667f00b --- /dev/null +++ b/pkg/discovery/file/file_test.go @@ -0,0 +1,114 @@ +package file + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("/path/to/file", 1000, 0, nil) + c.Assert(d.path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestNew(c *check.C) { + d, err := discovery.New("file:///path/to/file", 0, 0, nil) + c.Assert(err, check.IsNil) + c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestContent(c *check.C) { + data := ` +1.1.1.[1:2]:1111 +2.2.2.[2:4]:2222 +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 5) + c.Assert(ips[0], check.Equals, "1.1.1.1:1111") + c.Assert(ips[1], check.Equals, "1.1.1.2:1111") + c.Assert(ips[2], check.Equals, "2.2.2.2:2222") + c.Assert(ips[3], check.Equals, "2.2.2.3:2222") + c.Assert(ips[4], check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + discovery := &Discovery{path: "/path/to/file"} + c.Assert(discovery.Register("0.0.0.0"), check.NotNil) +} + +func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) { + data := ` +### test ### +1.1.1.1:1111 # inline comment +# 2.2.2.2:2222 + ### empty line with comment + 3.3.3.3:3333 +### test ### +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 2) + c.Assert("1.1.1.1:1111", check.Equals, ips[0]) + c.Assert("3.3.3.3:3333", check.Equals, ips[1]) +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + data := ` +1.1.1.1:1111 +2.2.2.2:2222 +` + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + // Create a temporary file and remove it. + tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") + c.Assert(err, check.IsNil) + c.Assert(tmp.Close(), check.IsNil) + c.Assert(os.Remove(tmp.Name()), check.IsNil) + + // Set up file discovery. + d := &Discovery{} + d.Initialize(tmp.Name(), 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // Make sure it fires errors since the file doesn't exist. + c.Assert(<-errCh, check.NotNil) + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Write the file and make sure we get the expected value back. + c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry and look it up. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) + c.Assert(err, check.IsNil) + c.Assert(f, check.NotNil) + _, err = f.WriteString("\n3.3.3.3:3333\n") + c.Assert(err, check.IsNil) + f.Close() + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/pkg/discovery/generator.go b/pkg/discovery/generator.go new file mode 100644 index 0000000..d222982 --- /dev/null +++ b/pkg/discovery/generator.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "fmt" + "regexp" + "strconv" +) + +// Generate takes care of IP generation +func Generate(pattern string) []string { + re, _ := regexp.Compile(`\[(.+):(.+)\]`) + submatch := re.FindStringSubmatch(pattern) + if submatch == nil { + return []string{pattern} + } + + from, err := strconv.Atoi(submatch[1]) + if err != nil { + return []string{pattern} + } + to, err := strconv.Atoi(submatch[2]) + if err != nil { + return []string{pattern} + } + + template := re.ReplaceAllString(pattern, "%d") + + var result []string + for val := from; val <= to; val++ { + entry := fmt.Sprintf(template, val) + result = append(result, entry) + } + + return result +} diff --git a/pkg/discovery/generator_test.go b/pkg/discovery/generator_test.go new file mode 100644 index 0000000..6281c46 --- /dev/null +++ b/pkg/discovery/generator_test.go @@ -0,0 +1,53 @@ +package discovery + +import ( + "github.com/go-check/check" +) + +func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) { + ips := Generate("127.0.0.1") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1") +} + +func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) { + ips := Generate("127.0.0.1:8080") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1:8080") +} + +func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) { + ips := Generate("127.0.0.[1]") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.[1]") +} + +func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) { + ips := Generate("127.0.0.[1:11]:2375") + c.Assert(len(ips), check.Equals, 11) + c.Assert(ips[0], check.Equals, "127.0.0.1:2375") + c.Assert(ips[1], check.Equals, "127.0.0.2:2375") + c.Assert(ips[2], check.Equals, "127.0.0.3:2375") + c.Assert(ips[3], check.Equals, "127.0.0.4:2375") + c.Assert(ips[4], check.Equals, "127.0.0.5:2375") + c.Assert(ips[5], check.Equals, "127.0.0.6:2375") + c.Assert(ips[6], check.Equals, "127.0.0.7:2375") + c.Assert(ips[7], check.Equals, "127.0.0.8:2375") + c.Assert(ips[8], check.Equals, "127.0.0.9:2375") + c.Assert(ips[9], check.Equals, "127.0.0.10:2375") + c.Assert(ips[10], check.Equals, "127.0.0.11:2375") +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) { + malformedInput := "127.0.0.[x:11]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) { + malformedInput := "127.0.0.[1:x]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} diff --git a/pkg/discovery/kv/kv.go b/pkg/discovery/kv/kv.go new file mode 100644 index 0000000..25be051 --- /dev/null +++ b/pkg/discovery/kv/kv.go @@ -0,0 +1,180 @@ +package kv + +import ( + "fmt" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/libkv" + "github.com/docker/libkv/store" +) + +const ( + defaultDiscoveryPath = "docker/nodes" +) + +// Discovery is exported +type Discovery struct { + backend store.Backend + store store.Store + heartbeat time.Duration + ttl time.Duration + prefix string + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { + var ( + parts = strings.SplitN(uris, "/", 2) + addrs = strings.Split(parts[0], ",") + err error + ) + + // A custom prefix to the path can be optionally used. + if len(parts) == 2 { + s.prefix = parts[1] + } + + s.heartbeat = heartbeat + s.ttl = ttl + + // Use a custom path if specified in discovery options + dpath := defaultDiscoveryPath + if clusterOpts["kv.path"] != "" { + dpath = clusterOpts["kv.path"] + } + + s.path = path.Join(s.prefix, dpath) + + var config *store.Config + if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { + logrus.Info("Initializing discovery with TLS") + tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }) + if err != nil { + return err + } + config = &store.Config{ + // Set ClientTLS to trigger https (bug in libkv/etcd) + ClientTLS: &store.ClientTLSConfig{ + CACertFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }, + // The actual TLS config that will be used + TLS: tlsConfig, + } + } else { + logrus.Info("Initializing discovery without TLS") + } + + // Creates a new store, will ignore options given + // if not supported by the chosen store + s.store, err = libkv.NewStore(s.backend, addrs, config) + return err +} + +// Watch the store until either there's a store error or we receive a stop request. +// Returns false if we shouldn't attempt watching the store anymore (stop request received). +func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { + for { + select { + case pairs := <-watchCh: + if pairs == nil { + return true + } + + logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) + + // Convert `KVPair` into `discovery.Entry`. + addrs := make([]string, len(pairs)) + for _, pair := range pairs { + addrs = append(addrs, string(pair.Value)) + } + + entries, err := discovery.CreateEntries(addrs) + if err != nil { + errCh <- err + } else { + discoveryCh <- entries + } + case <-stopCh: + // We were requested to stop watching. + return false + } + } +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + + go func() { + defer close(ch) + defer close(errCh) + + // Forever: Create a store watch, watch until we get an error and then try again. + // Will only stop if we receive a stopCh request. + for { + // Create the path to watch if it does not exist yet + exists, err := s.store.Exists(s.path) + if err != nil { + errCh <- err + } + if !exists { + if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { + errCh <- err + } + } + + // Set up a watch. + watchCh, err := s.store.WatchTree(s.path, stopCh) + if err != nil { + errCh <- err + } else { + if !s.watchOnce(stopCh, watchCh, ch, errCh) { + return + } + } + + // If we get here it means the store watch channel was closed. This + // is unexpected so let's retry later. + errCh <- fmt.Errorf("Unexpected watch error") + time.Sleep(s.heartbeat) + } + }() + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + opts := &store.WriteOptions{TTL: s.ttl} + return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) +} + +// Store returns the underlying store used by KV discovery. +func (s *Discovery) Store() store.Store { + return s.store +} + +// Prefix returns the store prefix +func (s *Discovery) Prefix() string { + return s.prefix +} diff --git a/pkg/discovery/kv/kv_test.go b/pkg/discovery/kv/kv_test.go new file mode 100644 index 0000000..dab3939 --- /dev/null +++ b/pkg/discovery/kv/kv_test.go @@ -0,0 +1,324 @@ +package kv + +import ( + "errors" + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/docker/docker/pkg/discovery" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (ds *DiscoverySuite) TestInitialize(c *check.C) { + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1"}, + } + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1", 0, 0, nil) + d.store = storeMock + + s := d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1") + c.Assert(d.path, check.Equals, defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 3) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234") + c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234") + + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) +} + +// Extremely limited mock store so we can test initialization +type Mock struct { + // Endpoints passed to InitializeMock + Endpoints []string + + // Options passed to InitializeMock + Options *store.Config +} + +func NewMock(endpoints []string, options *store.Config) (store.Store, error) { + s := &Mock{} + s.Endpoints = endpoints + s.Options = options + return s, nil +} +func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { + return errors.New("Put not supported") +} +func (s *Mock) Get(key string) (*store.KVPair, error) { + return nil, errors.New("Get not supported") +} +func (s *Mock) Delete(key string) error { + return errors.New("Delete not supported") +} + +// Exists mock +func (s *Mock) Exists(key string) (bool, error) { + return false, errors.New("Exists not supported") +} + +// Watch mock +func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, errors.New("Watch not supported") +} + +// WatchTree mock +func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, errors.New("WatchTree not supported") +} + +// NewLock mock +func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, errors.New("NewLock not supported") +} + +// List mock +func (s *Mock) List(prefix string) ([]*store.KVPair, error) { + return nil, errors.New("List not supported") +} + +// DeleteTree mock +func (s *Mock) DeleteTree(prefix string) error { + return errors.New("DeleteTree not supported") +} + +// AtomicPut mock +func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { + return false, nil, errors.New("AtomicPut not supported") +} + +// AtomicDelete mock +func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return false, errors.New("AtomicDelete not supported") +} + +// Close mock +func (s *Mock) Close() { + return +} + +func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { + cert := `-----BEGIN CERTIFICATE----- +MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT +B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD +VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC +O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds ++J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q +V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb +UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 +Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT +V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ +BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j +BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz +7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI +xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M +ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY +8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn +t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX +FpTxDmJHEV4bzUzh +-----END CERTIFICATE----- +` + key := `-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 ++zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR +SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr +pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe +rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj +xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj +i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx +qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO +1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 +5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony +MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 +ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP +L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N +XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT +Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B +LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU +t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ +QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV +xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj +xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc +qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa +V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV +PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk +dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL +BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= +-----END RSA PRIVATE KEY----- +` + certFile, err := ioutil.TempFile("", "cert") + c.Assert(err, check.IsNil) + defer os.Remove(certFile.Name()) + certFile.Write([]byte(cert)) + certFile.Close() + keyFile, err := ioutil.TempFile("", "key") + c.Assert(err, check.IsNil) + defer os.Remove(keyFile.Name()) + keyFile.Write([]byte(key)) + keyFile.Close() + + libkv.AddStore("mock", NewMock) + d := &Discovery{backend: "mock"} + err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ + "kv.cacertfile": certFile.Name(), + "kv.certfile": certFile.Name(), + "kv.keyfile": keyFile.Name(), + }) + c.Assert(err, check.IsNil) + s := d.store.(*Mock) + c.Assert(s.Options.TLS, check.NotNil) + c.Assert(s.Options.TLS.RootCAs, check.NotNil) + c.Assert(s.Options.TLS.Certificates, check.HasLen, 1) +} + +func (ds *DiscoverySuite) TestWatch(c *check.C) { + mockCh := make(chan []*store.KVPair) + + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + mockKVChan: mockCh, + } + + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + kvs := []*store.KVPair{ + {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, + {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, + } + + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // It should fire an error since the first WatchTree call failed. + c.Assert(<-errCh, check.ErrorMatches, "test error") + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Push the entries into the store channel and make sure discovery emits. + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + close(mockCh) + // Give it enough time to call WatchTree. + time.Sleep(3 * time.Second) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} + +// FakeStore implements store.Store methods. It mocks all store +// function in a simple, naive way. +type FakeStore struct { + Endpoints []string + Options *store.Config + mockKVChan <-chan []*store.KVPair + + watchTreeCallCount int +} + +func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { + return nil +} + +func (s *FakeStore) Get(key string) (*store.KVPair, error) { + return nil, nil +} + +func (s *FakeStore) Delete(key string) error { + return nil +} + +func (s *FakeStore) Exists(key string) (bool, error) { + return true, nil +} + +func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, nil +} + +// WatchTree will fail the first time, and return the mockKVchan afterwards. +// This is the behavior we need for testing.. If we need 'moar', should update this. +func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + if s.watchTreeCallCount == 0 { + s.watchTreeCallCount = 1 + return nil, errors.New("test error") + } + // First calls error + return s.mockKVChan, nil +} + +func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, nil +} + +func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { + return []*store.KVPair{}, nil +} + +func (s *FakeStore) DeleteTree(directory string) error { + return nil +} + +func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + return true, nil, nil +} + +func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return true, nil +} + +func (s *FakeStore) Close() { +} diff --git a/pkg/discovery/memory/memory.go b/pkg/discovery/memory/memory.go new file mode 100644 index 0000000..ba8b1f5 --- /dev/null +++ b/pkg/discovery/memory/memory.go @@ -0,0 +1,93 @@ +package memory + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery implements a discovery backend that keeps +// data in memory. +type Discovery struct { + heartbeat time.Duration + values []string + mu sync.Mutex +} + +func init() { + Init() +} + +// Init registers the memory backend on demand. +func Init() { + discovery.Register("memory", &Discovery{}) +} + +// Initialize sets the heartbeat for the memory backend. +func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { + s.heartbeat = heartbeat + s.values = make([]string, 0) + return nil +} + +// Watch sends periodic discovery updates to a channel. +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + var currentEntries discovery.Entries + var err error + + s.mu.Lock() + if len(s.values) > 0 { + currentEntries, err = discovery.CreateEntries(s.values) + } + s.mu.Unlock() + + if err != nil { + errCh <- err + } else if currentEntries != nil { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + s.mu.Lock() + newEntries, err := discovery.CreateEntries(s.values) + s.mu.Unlock() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register adds a new address to the discovery. +func (s *Discovery) Register(addr string) error { + s.mu.Lock() + s.values = append(s.values, addr) + s.mu.Unlock() + return nil +} diff --git a/pkg/discovery/memory/memory_test.go b/pkg/discovery/memory/memory_test.go new file mode 100644 index 0000000..c2da0a0 --- /dev/null +++ b/pkg/discovery/memory/memory_test.go @@ -0,0 +1,48 @@ +package memory + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type discoverySuite struct{} + +var _ = check.Suite(&discoverySuite{}) + +func (s *discoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("foo", 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + } + + c.Assert(d.Register("1.1.1.1:1111"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + expected = discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + c.Assert(d.Register("2.2.2.2:2222"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/pkg/discovery/nodes/nodes.go b/pkg/discovery/nodes/nodes.go new file mode 100644 index 0000000..c0e3c07 --- /dev/null +++ b/pkg/discovery/nodes/nodes.go @@ -0,0 +1,54 @@ +package nodes + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + entries discovery.Entries +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("nodes", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { + for _, input := range strings.Split(uris, ",") { + for _, ip := range discovery.Generate(input) { + entry, err := discovery.NewEntry(ip) + if err != nil { + return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) + } + s.entries = append(s.entries, entry) + } + } + + return nil +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + go func() { + defer close(ch) + ch <- s.entries + <-stopCh + }() + return ch, nil +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/pkg/discovery/nodes/nodes_test.go b/pkg/discovery/nodes/nodes_test.go new file mode 100644 index 0000000..e26568c --- /dev/null +++ b/pkg/discovery/nodes/nodes_test.go @@ -0,0 +1,51 @@ +package nodes + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 2) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222") +} + +func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 5) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111") + c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222") + c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222") + c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + ch, _ := d.Watch(nil) + c.Assert(expected.Equals(<-ch), check.Equals, true) +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + d := &Discovery{} + c.Assert(d.Register("0.0.0.0"), check.NotNil) +} diff --git a/pkg/filenotify/filenotify.go b/pkg/filenotify/filenotify.go new file mode 100644 index 0000000..7a81cbd --- /dev/null +++ b/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify + +import "github.com/fsnotify/fsnotify" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/pkg/filenotify/fsnotify.go b/pkg/filenotify/fsnotify.go new file mode 100644 index 0000000..5d08a99 --- /dev/null +++ b/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify + +import "github.com/fsnotify/fsnotify" + +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifer interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// Events returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// Errors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/pkg/filenotify/poller.go b/pkg/filenotify/poller.go new file mode 100644 index 0000000..b90111b --- /dev/null +++ b/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/fsnotify/fsnotify" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchWatch is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("watch does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/pkg/filenotify/poller_test.go b/pkg/filenotify/poller_test.go new file mode 100644 index 0000000..d854201 --- /dev/null +++ b/pkg/filenotify/poller_test.go @@ -0,0 +1,119 @@ +package filenotify + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "testing" + "time" + + "github.com/fsnotify/fsnotify" +) + +func TestPollerAddRemove(t *testing.T) { + w := NewPollingWatcher() + + if err := w.Add("no-such-file"); err == nil { + t.Fatal("should have gotten error when adding a non-existent file") + } + if err := w.Remove("no-such-file"); err == nil { + t.Fatal("should have gotten error when removing non-existent watch") + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + if err := w.Remove(f.Name()); err != nil { + t.Fatal(err) + } +} + +func TestPollerEvent(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("No chmod on Windows") + } + w := NewPollingWatcher() + + f, err := ioutil.TempFile("", "test-poller") + if err != nil { + t.Fatal("error creating temp file") + } + defer os.RemoveAll(f.Name()) + f.Close() + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + select { + case <-w.Events(): + t.Fatal("got event before anything happened") + case <-w.Errors(): + t.Fatal("got error before anything happened") + default: + } + + if err := ioutil.WriteFile(f.Name(), []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Write); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(f.Name(), 600); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Chmod); err != nil { + t.Fatal(err) + } + + if err := os.Remove(f.Name()); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Remove); err != nil { + t.Fatal(err) + } +} + +func TestPollerClose(t *testing.T) { + w := NewPollingWatcher() + if err := w.Close(); err != nil { + t.Fatal(err) + } + // test double-close + if err := w.Close(); err != nil { + t.Fatal(err) + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + if err := w.Add(f.Name()); err == nil { + t.Fatal("should have gotten error adding watch for closed watcher") + } +} + +func assertEvent(w FileWatcher, eType fsnotify.Op) error { + var err error + select { + case e := <-w.Events(): + if e.Op != eType { + err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e.Op) + } + case e := <-w.Errors(): + err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) + case <-time.After(watchWaitTime * 3): + err = fmt.Errorf("timeout waiting for event %v", eType) + } + return err +} diff --git a/pkg/fileutils/fileutils.go b/pkg/fileutils/fileutils.go new file mode 100644 index 0000000..57cc087 --- /dev/null +++ b/pkg/fileutils/fileutils.go @@ -0,0 +1,298 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/Sirupsen/logrus" +) + +// PatternMatcher allows checking paths agaist a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/pkg/fileutils/fileutils_darwin.go b/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 0000000..ccd648f --- /dev/null +++ b/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/pkg/fileutils/fileutils_solaris.go b/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 0000000..0f2cb7a --- /dev/null +++ b/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/pkg/fileutils/fileutils_test.go b/pkg/fileutils/fileutils_test.go new file mode 100644 index 0000000..3d61d55 --- /dev/null +++ b/pkg/fileutils/fileutils_test.go @@ -0,0 +1,591 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "testing" + + "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if !match { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if !match { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if !match { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +type matchesTestCase struct { + pattern string + text string + pass bool +} + +func TestMatches(t *testing.T) { + tests := []matchesTestCase{ + {"**", "file", true}, + {"**", "file/", true}, + {"**/", "file", true}, // weird one + {"**/", "file/", true}, + {"**", "/", true}, + {"**/", "/", true}, + {"**", "dir/file", true}, + {"**/", "dir/file", true}, + {"**", "dir/file/", true}, + {"**/", "dir/file/", true}, + {"**/**", "dir/file", true}, + {"**/**", "dir/file/", true}, + {"dir/**", "dir/file", true}, + {"dir/**", "dir/file/", true}, + {"dir/**", "dir/dir2/file", true}, + {"dir/**", "dir/dir2/file/", true}, + {"**/dir2/*", "dir/dir2/file", true}, + {"**/dir2/*", "dir/dir2/file/", true}, + {"**/dir2/**", "dir/dir2/dir3/file", true}, + {"**/dir2/**", "dir/dir2/dir3/file/", true}, + {"**file", "file", true}, + {"**file", "dir/file", true}, + {"**/file", "dir/file", true}, + {"**file", "dir/dir/file", true}, + {"**/file", "dir/dir/file", true}, + {"**/file*", "dir/dir/file", true}, + {"**/file*", "dir/dir/file.txt", true}, + {"**/file*txt", "dir/dir/file.txt", true}, + {"**/file*.txt", "dir/dir/file.txt", true}, + {"**/file*.txt*", "dir/dir/file.txt", true}, + {"**/**/*.txt", "dir/dir/file.txt", true}, + {"**/**/*.txt2", "dir/dir/file.txt", false}, + {"**/*.txt", "file.txt", true}, + {"**/**/*.txt", "file.txt", true}, + {"a**/*.txt", "a/file.txt", true}, + {"a**/*.txt", "a/dir/file.txt", true}, + {"a**/*.txt", "a/dir/dir/file.txt", true}, + {"a/*.txt", "a/dir/file.txt", false}, + {"a/*.txt", "a/file.txt", true}, + {"a/*.txt**", "a/file.txt", true}, + {"a[b-d]e", "ae", false}, + {"a[b-d]e", "ace", true}, + {"a[b-d]e", "aae", false}, + {"a[^b-d]e", "aze", true}, + {".*", ".foo", true}, + {".*", "foo", false}, + {"abc.def", "abcdef", false}, + {"abc.def", "abc.def", true}, + {"abc.def", "abcZdef", false}, + {"abc?def", "abcZdef", true}, + {"abc?def", "abcdef", false}, + {"a\\\\", "a\\", true}, + {"**/foo/bar", "foo/bar", true}, + {"**/foo/bar", "dir/foo/bar", true}, + {"**/foo/bar", "dir/dir2/foo/bar", true}, + {"abc/**", "abc", false}, + {"abc/**", "abc/def", true}, + {"abc/**", "abc/def/ghi", true}, + {"**/.foo", ".foo", true}, + {"**/.foo", "bar.foo", false}, + } + + if runtime.GOOS != "windows" { + tests = append(tests, []matchesTestCase{ + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, + }...) + } + + for _, test := range tests { + desc := fmt.Sprintf("pattern=%q text=%q", test.pattern, test.text) + pm, err := NewPatternMatcher([]string{test.pattern}) + require.NoError(t, err, desc) + res, _ := pm.Matches(test.text) + assert.Equal(t, test.pass, res, desc) + } +} + +func TestCleanPatterns(t *testing.T) { + patterns := []string{"docs", "config"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + cleaned := pm.Patterns() + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + patterns := []string{"docs", "config", ""} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + cleaned := pm.Patterns() + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + patterns := []string{"docs", "!docs/README.md"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + patterns := []string{"docs", " !docs/README.md"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + patterns := []string{"docs", "!docs/README.md "} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + patterns := []string{"!"} + _, err := NewPatternMatcher(patterns) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} + +// These matchTests are stolen from go's filepath Match tests. +type matchTest struct { + pattern, s string + match bool + err error +} + +var matchTests = []matchTest{ + {"abc", "abc", true, nil}, + {"*", "abc", true, nil}, + {"*c", "abc", true, nil}, + {"a*", "a", true, nil}, + {"a*", "abc", true, nil}, + {"a*", "ab/c", true, nil}, + {"a*/b", "abc/b", true, nil}, + {"a*/b", "a/c/b", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, + {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, + {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, + {"ab[c]", "abc", true, nil}, + {"ab[b-d]", "abc", true, nil}, + {"ab[e-g]", "abc", false, nil}, + {"ab[^c]", "abc", false, nil}, + {"ab[^b-d]", "abc", false, nil}, + {"ab[^e-g]", "abc", true, nil}, + {"a\\*b", "a*b", true, nil}, + {"a\\*b", "ab", false, nil}, + {"a?b", "a☺b", true, nil}, + {"a[^a]b", "a☺b", true, nil}, + {"a???b", "a☺b", false, nil}, + {"a[^a][^a][^a]b", "a☺b", false, nil}, + {"[a-ζ]*", "α", true, nil}, + {"*[a-ζ]", "A", false, nil}, + {"a?b", "a/b", false, nil}, + {"a*b", "a/b", false, nil}, + {"[\\]a]", "]", true, nil}, + {"[\\-]", "-", true, nil}, + {"[x\\-]", "x", true, nil}, + {"[x\\-]", "-", true, nil}, + {"[x\\-]", "z", false, nil}, + {"[\\-x]", "x", true, nil}, + {"[\\-x]", "-", true, nil}, + {"[\\-x]", "a", false, nil}, + {"[]a]", "]", false, filepath.ErrBadPattern}, + {"[-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "x", false, filepath.ErrBadPattern}, + {"[x-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "z", false, filepath.ErrBadPattern}, + {"[-x]", "x", false, filepath.ErrBadPattern}, + {"[-x]", "-", false, filepath.ErrBadPattern}, + {"[-x]", "a", false, filepath.ErrBadPattern}, + {"\\", "a", false, filepath.ErrBadPattern}, + {"[a-b-c]", "a", false, filepath.ErrBadPattern}, + {"[", "a", false, filepath.ErrBadPattern}, + {"[^", "a", false, filepath.ErrBadPattern}, + {"[^bc", "a", false, filepath.ErrBadPattern}, + {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong + {"a[", "ab", false, filepath.ErrBadPattern}, + {"*x", "xxx", true, nil}, +} + +func errp(e error) string { + if e == nil { + return "" + } + return e.Error() +} + +// TestMatch test's our version of filepath.Match, called regexpMatch. +func TestMatch(t *testing.T) { + for _, tt := range matchTests { + pattern := tt.pattern + s := tt.s + if runtime.GOOS == "windows" { + if strings.Contains(pattern, "\\") { + // no escape allowed on windows. + continue + } + pattern = filepath.Clean(pattern) + s = filepath.Clean(s) + } + ok, err := Matches(s, []string{pattern}) + if ok != tt.match || err != tt.err { + t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) + } + } +} diff --git a/pkg/fileutils/fileutils_unix.go b/pkg/fileutils/fileutils_unix.go new file mode 100644 index 0000000..d5c3abf --- /dev/null +++ b/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/pkg/fileutils/fileutils_windows.go b/pkg/fileutils/fileutils_windows.go new file mode 100644 index 0000000..5ec21ca --- /dev/null +++ b/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/pkg/fsutils/fsutils_linux.go b/pkg/fsutils/fsutils_linux.go new file mode 100644 index 0000000..c8cac30 --- /dev/null +++ b/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,87 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/pkg/fsutils/fsutils_linux_test.go b/pkg/fsutils/fsutils_linux_test.go new file mode 100644 index 0000000..4a64823 --- /dev/null +++ b/pkg/fsutils/fsutils_linux_test.go @@ -0,0 +1,91 @@ +// +build linux + +package fsutils + +import ( + "io/ioutil" + "os" + "os/exec" + "syscall" + "testing" +) + +func testSupportsDType(t *testing.T, expected bool, mkfsCommand string, mkfsArg ...string) { + // check whether mkfs is installed + if _, err := exec.LookPath(mkfsCommand); err != nil { + t.Skipf("%s not installed: %v", mkfsCommand, err) + } + + // create a sparse image + imageSize := int64(32 * 1024 * 1024) + imageFile, err := ioutil.TempFile("", "fsutils-image") + if err != nil { + t.Fatal(err) + } + imageFileName := imageFile.Name() + defer os.Remove(imageFileName) + if _, err = imageFile.Seek(imageSize-1, 0); err != nil { + t.Fatal(err) + } + if _, err = imageFile.Write([]byte{0}); err != nil { + t.Fatal(err) + } + if err = imageFile.Close(); err != nil { + t.Fatal(err) + } + + // create a mountpoint + mountpoint, err := ioutil.TempDir("", "fsutils-mountpoint") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(mountpoint) + + // format the image + args := append(mkfsArg, imageFileName) + t.Logf("Executing `%s %v`", mkfsCommand, args) + out, err := exec.Command(mkfsCommand, args...).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Fatal(err) + } + + // loopback-mount the image. + // for ease of setting up loopback device, we use os/exec rather than syscall.Mount + out, err = exec.Command("mount", "-o", "loop", imageFileName, mountpoint).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Skip("skipping the test because mount failed") + } + defer func() { + if err := syscall.Unmount(mountpoint, 0); err != nil { + t.Fatal(err) + } + }() + + // check whether it supports d_type + result, err := SupportsDType(mountpoint) + if err != nil { + t.Fatal(err) + } + t.Logf("Supports d_type: %v", result) + if result != expected { + t.Fatalf("expected %v, got %v", expected, result) + } +} + +func TestSupportsDTypeWithFType0XFS(t *testing.T) { + testSupportsDType(t, false, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=0") +} + +func TestSupportsDTypeWithFType1XFS(t *testing.T) { + testSupportsDType(t, true, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=1") +} + +func TestSupportsDTypeWithExt4(t *testing.T) { + testSupportsDType(t, true, "mkfs.ext4") +} diff --git a/pkg/homedir/homedir.go b/pkg/homedir/homedir.go new file mode 100644 index 0000000..8154e83 --- /dev/null +++ b/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/pkg/homedir/homedir_linux.go b/pkg/homedir/homedir_linux.go new file mode 100644 index 0000000..012fe52 --- /dev/null +++ b/pkg/homedir/homedir_linux.go @@ -0,0 +1,23 @@ +// +build linux + +package homedir + +import ( + "os" + + "github.com/docker/docker/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/pkg/homedir/homedir_others.go b/pkg/homedir/homedir_others.go new file mode 100644 index 0000000..6b96b85 --- /dev/null +++ b/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/pkg/homedir/homedir_test.go b/pkg/homedir/homedir_test.go new file mode 100644 index 0000000..7a95cb2 --- /dev/null +++ b/pkg/homedir/homedir_test.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "path/filepath" + "testing" +) + +func TestGet(t *testing.T) { + home := Get() + if home == "" { + t.Fatal("returned home directory is empty") + } + + if !filepath.IsAbs(home) { + t.Fatalf("returned path is not absolute: %s", home) + } +} + +func TestGetShortcutString(t *testing.T) { + shortcut := GetShortcutString() + if shortcut == "" { + t.Fatal("returned shortcut string is empty") + } +} diff --git a/pkg/idtools/idtools.go b/pkg/idtools/idtools.go new file mode 100644 index 0000000..68a072d --- /dev/null +++ b/pkg/idtools/idtools.go @@ -0,0 +1,279 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/pkg/idtools/idtools_unix.go b/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000..8701bb7 --- /dev/null +++ b/pkg/idtools/idtools_unix.go @@ -0,0 +1,204 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + // short-circuit--we were called with an existing directory and chown was requested + return os.Chown(path, ownerUID, ownerGID) + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/pkg/idtools/idtools_unix_test.go b/pkg/idtools/idtools_unix_test.go new file mode 100644 index 0000000..31522a5 --- /dev/null +++ b/pkg/idtools/idtools_unix_test.go @@ -0,0 +1,253 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/stretchr/testify/require" +) + +type node struct { + uid int + gid int +} + +func TestMkdirAllAs(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirall") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should be chowned, but nothing else + if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAllAndChownNew(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirnew") + require.NoError(t, err) + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + require.NoError(t, buildTree(dirName, testTree)) + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + err = MkdirAllAndChownNew(filepath.Join(dirName, "usr", "share"), 0755, IDPair{99, 99}) + require.NoError(t, err) + + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) + + // test 2-deep new directories--both should be owned by the uid/gid pair + err = MkdirAllAndChownNew(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{101, 101}) + require.NoError(t, err) + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) + + // test a directory that already exists; should NOT be chowned + err = MkdirAllAndChownNew(filepath.Join(dirName, "usr"), 0755, IDPair{102, 102}) + require.NoError(t, err) + verifyTree, err = readTree(dirName, "") + require.NoError(t, err) + require.NoError(t, compareTrees(testTree, verifyTree)) +} + +func TestMkdirAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdir") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + } + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should just chown to the requested uid/gid + if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // create a subdir under a dir which doesn't exist--should fail + if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { + t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") + } + + // create a subdir under an existing dir; should only change the ownership of the new subdir + if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr/bin"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func buildTree(base string, tree map[string]node) error { + for path, node := range tree { + fullPath := filepath.Join(base, path) + if err := os.MkdirAll(fullPath, 0755); err != nil { + return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) + } + if err := os.Chown(fullPath, node.uid, node.gid); err != nil { + return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) + } + } + return nil +} + +func readTree(base, root string) (map[string]node, error) { + tree := make(map[string]node) + + dirInfos, err := ioutil.ReadDir(base) + if err != nil { + return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) + } + + for _, info := range dirInfos { + s := &syscall.Stat_t{} + if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { + return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) + } + tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} + if info.IsDir() { + // read the subdirectory + subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) + if err != nil { + return nil, err + } + for path, nodeinfo := range subtree { + tree[path] = nodeinfo + } + } + } + return tree, nil +} + +func compareTrees(left, right map[string]node) error { + if len(left) != len(right) { + return fmt.Errorf("Trees aren't the same size") + } + for path, nodeLeft := range left { + if nodeRight, ok := right[path]; ok { + if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { + // mismatch + return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, + nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) + } + continue + } + return fmt.Errorf("right tree didn't contain path %q", path) + } + return nil +} + +func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "parsesubid") + if err != nil { + t.Fatal(err) + } + fnamePath := filepath.Join(tmpDir, "testsubuid") + fcontent := `tss:100000:65536 +# empty default subuid/subgid file + +dockremap:231072:65536` + if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil { + t.Fatal(err) + } + ranges, err := parseSubidFile(fnamePath, "dockremap") + if err != nil { + t.Fatal(err) + } + if len(ranges) != 1 { + t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges)) + } + if ranges[0].Start != 231072 { + t.Fatalf("wanted 231072, got %d instead", ranges[0].Start) + } + if ranges[0].Length != 65536 { + t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) + } +} diff --git a/pkg/idtools/idtools_windows.go b/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000..45d2878 --- /dev/null +++ b/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/pkg/idtools/usergroupadd_linux.go b/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000..9da7975 --- /dev/null +++ b/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/pkg/idtools/usergroupadd_unsupported.go b/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000..d98b354 --- /dev/null +++ b/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/pkg/idtools/utils_unix.go b/pkg/idtools/utils_unix.go new file mode 100644 index 0000000..9703ecb --- /dev/null +++ b/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/pkg/ioutils/buffer.go b/pkg/ioutils/buffer.go new file mode 100644 index 0000000..3d737b3 --- /dev/null +++ b/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/pkg/ioutils/buffer_test.go b/pkg/ioutils/buffer_test.go new file mode 100644 index 0000000..f687124 --- /dev/null +++ b/pkg/ioutils/buffer_test.go @@ -0,0 +1,153 @@ +package ioutils + +import ( + "bytes" + "testing" +) + +func TestFixedBufferCap(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 5)} + + n := buf.Cap() + if n != 5 { + t.Fatalf("expected buffer capacity to be 5 bytes, got %d", n) + } +} + +func TestFixedBufferLen(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 10)} + + buf.Write([]byte("hello")) + l := buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to be 5 bytes, got %d", l) + } + + buf.Write([]byte("world")) + l = buf.Len() + if l != 10 { + t.Fatalf("expected buffer length to be 10 bytes, got %d", l) + } + + // read 5 bytes + b := make([]byte, 5) + buf.Read(b) + + l = buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to be 5 bytes, got %d", l) + } + + n, err := buf.Write([]byte("i-wont-fit")) + if n != 0 { + t.Fatalf("expected no bytes to be written to buffer, got %d", n) + } + if err != errBufferFull { + t.Fatalf("expected errBufferFull, got %v", err) + } + + l = buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to still be 5 bytes, got %d", l) + } + + buf.Reset() + l = buf.Len() + if l != 0 { + t.Fatalf("expected buffer length to still be 0 bytes, got %d", l) + } +} + +func TestFixedBufferString(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 10)} + + buf.Write([]byte("hello")) + buf.Write([]byte("world")) + + out := buf.String() + if out != "helloworld" { + t.Fatalf("expected output to be \"helloworld\", got %q", out) + } + + // read 5 bytes + b := make([]byte, 5) + buf.Read(b) + + // test that fixedBuffer.String() only returns the part that hasn't been read + out = buf.String() + if out != "world" { + t.Fatalf("expected output to be \"world\", got %q", out) + } +} + +func TestFixedBufferWrite(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + n, err := buf.Write([]byte("hello")) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes written, got %d", n) + } + + if string(buf.buf[:5]) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(buf.buf[:5])) + } + + n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) + if n != 59 { + t.Fatalf("expected 59 bytes written before buffer is full, got %d", n) + } + if err != errBufferFull { + t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) + } +} + +func TestFixedBufferRead(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + if _, err := buf.Write([]byte("hello world")); err != nil { + t.Fatal(err) + } + + b := make([]byte, 5) + n, err := buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d - %s", n, buf.String()) + } + + if string(b) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(b)) + } + + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d", n) + } + + if string(b) != " worl" { + t.Fatalf("expected \" worl\", got %s", string(b)) + } + + b = b[:1] + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 1 { + t.Fatalf("expected 1 byte read, got %d - %s", n, buf.String()) + } + + if string(b) != "d" { + t.Fatalf("expected \"d\", got %s", string(b)) + } +} diff --git a/pkg/ioutils/bytespipe.go b/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000..72a04f3 --- /dev/null +++ b/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/pkg/ioutils/bytespipe_test.go b/pkg/ioutils/bytespipe_test.go new file mode 100644 index 0000000..300fb5f --- /dev/null +++ b/pkg/ioutils/bytespipe_test.go @@ -0,0 +1,159 @@ +package ioutils + +import ( + "crypto/sha1" + "encoding/hex" + "math/rand" + "testing" + "time" +) + +func TestBytesPipeRead(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + rd := make([]byte, 4) + n, err := buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "1234" { + t.Fatalf("Read %s, but must be %s", rd, "1234") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "5678" { + t.Fatalf("Read %s, but must be %s", rd, "5679") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) + } + if string(rd[:n]) != "90" { + t.Fatalf("Read %s, but must be %s", rd, "90") + } +} + +func TestBytesPipeWrite(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + if buf.buf[0].String() != "1234567890" { + t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890") + } +} + +// Write and read in different speeds/chunk sizes and check valid data is read. +func TestBytesPipeWriteRandomChunks(t *testing.T) { + cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ + {100, 10, 1}, + {1000, 10, 5}, + {1000, 100, 0}, + {1000, 5, 6}, + {10000, 50, 25}, + } + + testMessage := []byte("this is a random string for testing") + // random slice sizes to read and write + writeChunks := []int{25, 35, 15, 20} + readChunks := []int{5, 45, 20, 25} + + for _, c := range cases { + // first pass: write directly to hash + hash := sha1.New() + for i := 0; i < c.iterations*c.writesPerLoop; i++ { + if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { + t.Fatal(err) + } + } + expected := hex.EncodeToString(hash.Sum(nil)) + + // write/read through buffer + buf := NewBytesPipe() + hash.Reset() + + done := make(chan struct{}) + + go func() { + // random delay before read starts + <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) + for i := 0; ; i++ { + p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) + n, _ := buf.Read(p) + if n == 0 { + break + } + hash.Write(p[:n]) + } + + close(done) + }() + + for i := 0; i < c.iterations; i++ { + for w := 0; w < c.writesPerLoop; w++ { + buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) + } + } + buf.Close() + <-done + + actual := hex.EncodeToString(hash.Sum(nil)) + + if expected != actual { + t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) + } + + } +} + +func BenchmarkBytesPipeWrite(b *testing.B) { + testData := []byte("pretty short line, because why not?") + for i := 0; i < b.N; i++ { + readBuf := make([]byte, 1024) + buf := NewBytesPipe() + go func() { + var err error + for err == nil { + _, err = buf.Read(readBuf) + } + }() + for j := 0; j < 1000; j++ { + buf.Write(testData) + } + buf.Close() + } +} + +func BenchmarkBytesPipeRead(b *testing.B) { + rd := make([]byte, 512) + for i := 0; i < b.N; i++ { + b.StopTimer() + buf := NewBytesPipe() + for j := 0; j < 500; j++ { + buf.Write(make([]byte, 1024)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + if n, _ := buf.Read(rd); n != 512 { + b.Fatalf("Wrong number of bytes: %d", n) + } + } + } +} diff --git a/pkg/ioutils/concat.go b/pkg/ioutils/concat.go new file mode 100644 index 0000000..9e05af4 --- /dev/null +++ b/pkg/ioutils/concat.go @@ -0,0 +1,123 @@ +package ioutils + +import ( + "io" + "errors" +) + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func max(x, y int) int { + if x < y { + return y + } + return x +} + +func SeekerSize(s io.Seeker) (int64, error) { + cur, err := s.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + size, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + + if _, err := s.Seek(cur, io.SeekStart); err != nil { + return 0, err + } + + return size, nil +} + +type concatReadSeekCloser struct { + a ReadSeekCloser + aSize int64 + b ReadSeekCloser + bSize int64 + off int64 +} + +func (self *concatReadSeekCloser) Read(p []byte) (n int, err error) { + // if the read starts within a + if self.off < self.aSize { + if _, err := self.a.Seek(self.off, io.SeekStart); err != nil { + return 0, err + } + + nA, err := io.ReadFull(self.a, p[:min(len(p), int(self.aSize - self.off))]) + if err != nil { + return 0, err + } + n += nA + } + + // if the read ends within b + if self.off + int64(len(p)) >= self.aSize { + if _, err := self.b.Seek(int64(max(0, int(self.off - self.aSize))), io.SeekStart); err != nil { + return 0, err + } + + nB, err := io.ReadFull(self.b, p[max(int(self.aSize - self.off), 0):]) + if err != nil { + return 0, err + } + n += nB + } + + self.off += int64(n) + + return +} + +func (self *concatReadSeekCloser) Seek(offset int64, whence int) (int64, error) { + switch whence { + default: + return 0, errors.New("Seek: invalid whence") + case io.SeekStart: + break + case io.SeekCurrent: + offset += self.off + case io.SeekEnd: + offset += self.aSize + self.bSize + } + if offset < 0 { + return 0, errors.New("Seek: invalid offset") + } + self.off = offset + return offset, nil +} + +func (self *concatReadSeekCloser) Close() error { + err1 := self.a.Close() + err2 := self.b.Close() + + if err1 != nil { + return err1 + } + if err2 != nil { + return err2 + } + return nil +} + +func ConcatReadSeekClosers(a, b ReadSeekCloser) (ReadSeekCloser, error) { + aSize, err := SeekerSize(a) + if err != nil { + return nil, err + } + + bSize, err := SeekerSize(b) + if err != nil { + return nil, err + } + + return &concatReadSeekCloser{a, aSize, b, bSize, 0}, nil +} diff --git a/pkg/ioutils/fswriters.go b/pkg/ioutils/fswriters.go new file mode 100644 index 0000000..7018476 --- /dev/null +++ b/pkg/ioutils/fswriters.go @@ -0,0 +1,177 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Chmod(w.perm); err != nil { + return err + } + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if w.writeErr == nil { + if err := os.Rename(w.f.Name(), w.fn); err != nil { + return err + } + + parentDir, err := os.OpenFile(filepath.Dir(w.fn), syscall.O_DIRECTORY, 0) + if err != nil { + return err + } + + defer func() { + parentDir.Close() + }() + if err := parentDir.Sync(); err != nil { + return err + } + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/pkg/ioutils/fswriters_test.go b/pkg/ioutils/fswriters_test.go new file mode 100644 index 0000000..5d28600 --- /dev/null +++ b/pkg/ioutils/fswriters_test.go @@ -0,0 +1,132 @@ +package ioutils + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +var ( + testMode os.FileMode = 0640 +) + +func init() { + // Windows does not support full Linux file mode + if runtime.GOOS == "windows" { + testMode = 0666 + } +} + +func TestAtomicWriteToFile(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writers-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + expected := []byte("barbaz") + if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if !bytes.Equal(actual, expected) { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } +} + +func TestAtomicWriteSetCommit(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + targetDir := filepath.Join(tmpDir, "target") + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } + + if err := ws.Commit(targetDir); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if !bytes.Equal(actual, expected) { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } + +} + +func TestAtomicWriteSetCancel(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if err := ws.Cancel(); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(tmpDir, "target", "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } else if !os.IsNotExist(err) { + t.Fatalf("Unexpected error reading file: %s", err) + } +} diff --git a/pkg/ioutils/readers.go b/pkg/ioutils/readers.go new file mode 100644 index 0000000..53b6436 --- /dev/null +++ b/pkg/ioutils/readers.go @@ -0,0 +1,197 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +type readSeekCloserWrapper struct { + io.ReadSeeker + closer func() error +} + +func (r *readSeekCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadSeekCloserWrapper(r io.ReadSeeker, closer func() error) ReadSeekCloser { + return &readSeekCloserWrapper{ + ReadSeeker: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} diff --git a/pkg/ioutils/readers_test.go b/pkg/ioutils/readers_test.go new file mode 100644 index 0000000..9abc105 --- /dev/null +++ b/pkg/ioutils/readers_test.go @@ -0,0 +1,94 @@ +package ioutils + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type perpetualReader struct{} + +func (p *perpetualReader) Read(buf []byte) (n int, err error) { + for i := 0; i != len(buf); i++ { + buf[i] = 'a' + } + return len(buf), nil +} + +func TestCancelReadCloser(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) + for { + var buf [128]byte + _, err := cancelReadCloser.Read(buf[:]) + if err == context.DeadlineExceeded { + break + } else if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + } +} diff --git a/pkg/ioutils/temp_unix.go b/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000..1539ad2 --- /dev/null +++ b/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/pkg/ioutils/temp_windows.go b/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000..c258e5f --- /dev/null +++ b/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/pkg/ioutils/writeflusher.go b/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000..52a4901 --- /dev/null +++ b/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/pkg/ioutils/writers.go b/pkg/ioutils/writers.go new file mode 100644 index 0000000..ccc7f9c --- /dev/null +++ b/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/pkg/ioutils/writers_test.go b/pkg/ioutils/writers_test.go new file mode 100644 index 0000000..564b1cd --- /dev/null +++ b/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go new file mode 100644 index 0000000..4734c31 --- /dev/null +++ b/pkg/jsonlog/jsonlog.go @@ -0,0 +1,42 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "time" +) + +// JSONLog represents a log message, typically a single entry from a given log stream. +// JSONLogs can be easily serialized to and from JSON and support custom formatting. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` + // Attrs is the list of extra attributes provided by the user + Attrs map[string]string `json:"attrs,omitempty"` +} + +// Format returns the log formatted according to format +// If format is nil, returns the log message +// If format is json, returns the log marshaled in json format +// By default, returns the log with the log time formatted according to format. +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil +} + +// Reset resets the log to nil. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} diff --git a/pkg/jsonlog/jsonlog_marshalling.go b/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 0000000..83ce684 --- /dev/null +++ b/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,178 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make BIND_DIR=. shell +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = FastTimeMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } +// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// if len(mj.Log) != 0 { +// - if first == true { +// - first = false +// - } else { +// - buf.WriteString(`,`) +// - } +// + first = false +// buf.WriteString(`"log":`) +// ffjsonWriteJSONString(buf, mj.Log) +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" +) + +// MarshalJSON marshals the JSONLog. +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + if err := mj.MarshalJSONBuf(&buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = FastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/pkg/jsonlog/jsonlog_marshalling_test.go b/pkg/jsonlog/jsonlog_marshalling_test.go new file mode 100644 index 0000000..3edb271 --- /dev/null +++ b/pkg/jsonlog/jsonlog_marshalling_test.go @@ -0,0 +1,34 @@ +package jsonlog + +import ( + "regexp" + "testing" +) + +func TestJSONLogMarshalJSON(t *testing.T) { + logs := map[*JSONLog]string{ + &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, + &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, + &JSONLog{}: `^{\"time\":\".{20,}\"}$`, + // These ones are a little weird + &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, + } + for jsonLog, expression := range logs { + data, err := jsonLog.MarshalJSON() + if err != nil { + t.Fatal(err) + } + res := string(data) + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/pkg/jsonlog/jsonlogbytes.go b/pkg/jsonlog/jsonlogbytes.go new file mode 100644 index 0000000..0ba716f --- /dev/null +++ b/pkg/jsonlog/jsonlogbytes.go @@ -0,0 +1,122 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "unicode/utf8" +) + +// JSONLogs is based on JSONLog. +// It allows marshalling JSONLog from Log as []byte +// and an already marshalled Created timestamp. +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created string `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is based on the same method from JSONLog +// It has been modified to take into account the necessary changes. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if len(mj.RawAttrs) > 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + buf.WriteString(mj.Created) + buf.WriteString(`}`) + return nil +} + +// This is based on ffjsonWriteJSONBytesAsString. It has been changed +// to accept a string passed as a slice of bytes. +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/pkg/jsonlog/jsonlogbytes_test.go b/pkg/jsonlog/jsonlogbytes_test.go new file mode 100644 index 0000000..6d6ad21 --- /dev/null +++ b/pkg/jsonlog/jsonlogbytes_test.go @@ -0,0 +1,39 @@ +package jsonlog + +import ( + "bytes" + "regexp" + "testing" +) + +func TestJSONLogsMarshalJSONBuf(t *testing.T) { + logs := map[*JSONLogs]string{ + &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, + &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, + &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, + &JSONLogs{Created: "time"}: `^{\"time\":time}$`, + &JSONLogs{}: `^{\"time\":}$`, + // These ones are a little weird + &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, + &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, + &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, + // with raw attributes + &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, + } + for jsonLog, expression := range logs { + var buf bytes.Buffer + if err := jsonLog.MarshalJSONBuf(&buf); err != nil { + t.Fatal(err) + } + res := buf.String() + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/pkg/jsonlog/time_marshalling.go b/pkg/jsonlog/time_marshalling.go new file mode 100644 index 0000000..2117338 --- /dev/null +++ b/pkg/jsonlog/time_marshalling.go @@ -0,0 +1,27 @@ +// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. +package jsonlog + +import ( + "errors" + "time" +) + +const ( + // RFC3339NanoFixed is our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +// FastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func FastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/pkg/jsonlog/time_marshalling_test.go b/pkg/jsonlog/time_marshalling_test.go new file mode 100644 index 0000000..02d0302 --- /dev/null +++ b/pkg/jsonlog/time_marshalling_test.go @@ -0,0 +1,47 @@ +package jsonlog + +import ( + "testing" + "time" +) + +// Testing to ensure 'year' fields is between 0 and 9999 +func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { + aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) + json, err := FastTimeMarshalJSON(aTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) + json, err = FastTimeMarshalJSON(anotherTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + +} + +func TestFastTimeMarshalJSON(t *testing.T) { + aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) + json, err := FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected := "\"2015-05-29T11:01:02.000000003Z\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } + + location, err := time.LoadLocation("Europe/Paris") + if err != nil { + t.Fatal(err) + } + aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) + json, err = FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected = "\"2015-05-29T11:01:02.000000003+02:00\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } +} diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 0000000..dc785d6 --- /dev/null +++ b/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,315 @@ +package jsonmessage + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/Nvveen/Gotty" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" + "github.com/docker/go-units" +) + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ +type termInfo interface { + Parse(attr string, params ...interface{}) (string, error) +} + +type noTermInfo struct{} // canary used when no terminfo. + +func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { + return "", fmt.Errorf("noTermInfo") +} + +func clearLine(out io.Writer, ti termInfo) { + // el2 (clear whole line) is not exposed by terminfo. + + // First clear line from beginning to cursor + if attr, err := ti.Parse("el1"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[1K") + } + // Then clear line from cursor to end + if attr, err := ti.Parse("el"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[K") + } +} + +func cursorUp(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cuu", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dA", l) + } +} + +func cursorDown(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cud", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dB", l) + } +} + +// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if termInfo != nil && jm.Stream == "" && jm.Progress != nil { + clearLine(out, termInfo) + endl = "\r" + fmt.Fprintf(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && termInfo != nil { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + ) + + var termInfo termInfo + + if isTerminal { + term := os.Getenv("TERM") + if term == "" { + term = "vt102" + } + + var err error + if termInfo, err = gotty.OpenTermInfo(term); err != nil { + termInfo = &noTermInfo{} + } + } + + for { + diff := 0 + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = len(ids) + ids[jm.ID] = line + if termInfo != nil { + fmt.Fprintf(out, "\n") + } + } + diff = len(ids) - line + if termInfo != nil { + cursorUp(out, termInfo, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) + } + err := jm.Display(out, termInfo) + if jm.ID != "" && termInfo != nil { + cursorDown(out, termInfo, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/pkg/jsonmessage/jsonmessage_test.go b/pkg/jsonmessage/jsonmessage_test.go new file mode 100644 index 0000000..c3ed6c0 --- /dev/null +++ b/pkg/jsonmessage/jsonmessage_test.go @@ -0,0 +1,281 @@ +package jsonmessage + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + termsz, err := term.GetWinsize(0) + if err != nil { + // we can safely ignore the err here + termsz = nil + } + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expectedStart := "[==========> ] 20B/100B" + if termsz != nil && termsz.Width <= 110 { + expectedStart = " 20B/100B" + } + jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} + // Just look at the start of the string + // (the remaining time is really hard to test -_-) + if jp3.String()[:len(expectedStart)] != expectedStart { + t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + } + + expected = "[=========================> ] 50B/100B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50B/100B" + } + jp4 := JSONProgress{Current: 50, Total: 100} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } + + // this number can't be negative gh#7136 + expected = "[==================================================>] 50B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50B" + } + jp5 := JSONProgress{Current: 50, Total: 40} + if jp5.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp5.String()) + } + + expected = "[=========================> ] 50/100 units" + if termsz != nil && termsz.Width <= 110 { + expected = " 50/100 units" + } + jp6 := JSONProgress{Current: 50, Total: 100, Units: "units"} + if jp6.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp6.String()) + } + + // this number can't be negative + expected = "[==================================================>] 50 units" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 units" + } + jp7 := JSONProgress{Current: 50, Total: 40, Units: "units"} + if jp7.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp7.String()) + } + + expected = "[=========================> ] " + if termsz != nil && termsz.Width <= 110 { + expected = "" + } + jp8 := JSONProgress{Current: 50, Total: 100, HideCounts: true} + if jp8.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp8.String()) + } +} + +func TestJSONMessageDisplay(t *testing.T) { + now := time.Now() + messages := map[JSONMessage][]string{ + // Empty + {}: {"\n", "\n"}, + // Status + { + Status: "status", + }: { + "status\n", + "status\n", + }, + // General + { + Time: now.Unix(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with nano precision time + { + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with both times Nano is preferred + { + Time: now.Unix(), + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // Stream over status + { + Status: "status", + Stream: "stream", + }: { + "stream", + "stream", + }, + // With progress message + { + Status: "status", + ProgressMessage: "progressMessage", + }: { + "status progressMessage", + "status progressMessage", + }, + // With progress, stream empty + { + Status: "status", + Stream: "", + Progress: &JSONProgress{Current: 1}, + }: { + "", + fmt.Sprintf("%c[1K%c[K\rstatus 1B\r", 27, 27), + }, + } + + // The tests :) + for jsonMessage, expectedMessages := range messages { + // Without terminal + data := bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected %q,got %q", expectedMessages[0], data.String()) + } + // With terminal + data = bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, &noTermInfo{}); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("\nExpected %q\n got %q", expectedMessages[1], data.String()) + } + } +} + +// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. +func TestJSONMessageDisplayWithJSONError(t *testing.T) { + data := bytes.NewBuffer([]byte{}) + jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} + + err := jsonMessage.Display(data, &noTermInfo{}) + if err == nil || err.Error() != "Can't find it" { + t.Fatalf("Expected a JSONError 404, got %q", err) + } + + jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} + err = jsonMessage.Display(data, &noTermInfo{}) + if err == nil || err.Error() != "Authentication is required." { + t.Fatalf("Expected an error \"Authentication is required.\", got %q", err) + } +} + +func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { + var ( + inFd uintptr + ) + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader("This is not a 'valid' JSON []") + inFd, _ = term.GetFdInfo(reader) + + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { + t.Fatalf("Should have thrown an error (invalid character in ..), got %q", err) + } +} + +func TestDisplayJSONMessagesStream(t *testing.T) { + var ( + inFd uintptr + ) + + messages := map[string][]string{ + // empty string + "": { + "", + ""}, + // Without progress & ID + "{ \"status\": \"status\" }": { + "status\n", + "status\n", + }, + // Without progress, with ID + "{ \"id\": \"ID\",\"status\": \"status\" }": { + "ID: status\n", + fmt.Sprintf("ID: status\n"), + }, + // With progress + "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { + "ID: status ProgressMessage", + fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 1, 27, 1), + }, + // With progressDetail + "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { + "", // progressbar is disabled in non-terminal + fmt.Sprintf("\n%c[%dA%c[1K%c[K\rID: status 1B\r%c[%dB", 27, 1, 27, 27, 27, 1), + }, + } + + // Use $TERM which is unlikely to exist, forcing DisplayJSONMessageStream to + // (hopefully) use &noTermInfo. + origTerm := os.Getenv("TERM") + os.Setenv("TERM", "xyzzy-non-existent-terminfo") + + for jsonMessage, expectedMessages := range messages { + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader(jsonMessage) + inFd, _ = term.GetFdInfo(reader) + + // Without terminal + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected an %q, got %q", expectedMessages[0], data.String()) + } + + // With terminal + data = bytes.NewBuffer([]byte{}) + reader = strings.NewReader(jsonMessage) + if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("\nExpected %q\n got %q", expectedMessages[1], data.String()) + } + } + os.Setenv("TERM", origTerm) + +} diff --git a/pkg/listeners/group_unix.go b/pkg/listeners/group_unix.go new file mode 100644 index 0000000..e1d8774 --- /dev/null +++ b/pkg/listeners/group_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package listeners + +import ( + "fmt" + "strconv" + + "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +const defaultSocketGroup = "docker" + +func lookupGID(name string) (int, error) { + groupFile, err := user.GetGroupPath() + if err != nil { + return -1, errors.Wrap(err, "error looking up groups") + } + groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { + return g.Name == name || strconv.Itoa(g.Gid) == name + }) + if err != nil { + return -1, errors.Wrapf(err, "error parsing groups for %s", name) + } + if len(groups) > 0 { + return groups[0].Gid, nil + } + gid, err := strconv.Atoi(name) + if err == nil { + return gid, nil + } + return -1, fmt.Errorf("group %s not found", name) +} diff --git a/pkg/listeners/listeners_solaris.go b/pkg/listeners/listeners_solaris.go new file mode 100644 index 0000000..c9003bc --- /dev/null +++ b/pkg/listeners/listeners_solaris.go @@ -0,0 +1,43 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + gid, err := lookupGID(socketGroup) + if err != nil { + if socketGroup != "" { + if socketGroup != defaultSocketGroup { + return nil, err + } + logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) + } + gid = os.Getgid() + } + l, err := sockets.NewUnixSocket(addr, gid) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} diff --git a/pkg/listeners/listeners_unix.go b/pkg/listeners/listeners_unix.go new file mode 100644 index 0000000..25c98fb --- /dev/null +++ b/pkg/listeners/listeners_unix.go @@ -0,0 +1,104 @@ +// +build !windows,!solaris + +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "os" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/activation" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "fd": + fds, err := listenFD(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, fds...) + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + gid, err := lookupGID(socketGroup) + if err != nil { + if socketGroup != "" { + if socketGroup != defaultSocketGroup { + return nil, err + } + logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) + } + gid = os.Getgid() + } + l, err := sockets.NewUnixSocket(addr, gid) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("invalid protocol format: %q", proto) + } + + return ls, nil +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = activation.TLSListeners(false, tlsConfig) + } else { + listeners, err = activation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("too few socket activated files passed in by systemd") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + return nil, fmt.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/pkg/listeners/listeners_windows.go b/pkg/listeners/listeners_windows.go new file mode 100644 index 0000000..5b5a470 --- /dev/null +++ b/pkg/listeners/listeners_windows.go @@ -0,0 +1,54 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + + case "npipe": + // allow Administrators and SYSTEM, plus whatever additional users or groups were specified + sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" + if socketGroup != "" { + for _, g := range strings.Split(socketGroup, ",") { + sid, err := winio.LookupSidByName(g) + if err != nil { + return nil, err + } + sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) + } + } + c := winio.PipeConfig{ + SecurityDescriptor: sddl, + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + l, err := winio.ListenPipe(addr, &c) + if err != nil { + return nil, err + } + ls = append(ls, l) + + default: + return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") + } + + return ls, nil +} diff --git a/pkg/locker/README.md b/pkg/locker/README.md new file mode 100644 index 0000000..c8dbddc --- /dev/null +++ b/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/pkg/locker/locker.go b/pkg/locker/locker.go new file mode 100644 index 0000000..0b22ddf --- /dev/null +++ b/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/pkg/locker/locker_test.go b/pkg/locker/locker_test.go new file mode 100644 index 0000000..5a297dd --- /dev/null +++ b/pkg/locker/locker_test.go @@ -0,0 +1,124 @@ +package locker + +import ( + "sync" + "testing" + "time" +) + +func TestLockCounter(t *testing.T) { + l := &lockCtr{} + l.inc() + + if l.waiters != 1 { + t.Fatal("counter inc failed") + } + + l.dec() + if l.waiters != 0 { + t.Fatal("counter dec failed") + } +} + +func TestLockerLock(t *testing.T) { + l := New() + l.Lock("test") + ctr := l.locks["test"] + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) + } + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + chWaiting := make(chan struct{}) + go func() { + for range time.Tick(1 * time.Millisecond) { + if ctr.count() == 1 { + close(chWaiting) + break + } + } + }() + + select { + case <-chWaiting: + case <-time.After(3 * time.Second): + t.Fatal("timed out waiting for lock waiters to be incremented") + } + + select { + case <-chDone: + t.Fatal("lock should not have returned while it was still held") + default: + } + + if err := l.Unlock("test"); err != nil { + t.Fatal(err) + } + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should have completed") + } + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) + } +} + +func TestLockerUnlock(t *testing.T) { + l := New() + + l.Lock("test") + l.Unlock("test") + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should not be blocked") + } +} + +func TestLockerConcurrency(t *testing.T) { + l := New() + + var wg sync.WaitGroup + for i := 0; i <= 10000; i++ { + wg.Add(1) + go func() { + l.Lock("test") + // if there is a concurrency issue, will very likely panic here + l.Unlock("test") + wg.Done() + }() + } + + chDone := make(chan struct{}) + go func() { + wg.Wait() + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for locks to complete") + } + + // Since everything has unlocked this should not exist anymore + if ctr, exists := l.locks["test"]; exists { + t.Fatalf("lock should not exist: %v", ctr) + } +} diff --git a/pkg/longpath/longpath.go b/pkg/longpath/longpath.go new file mode 100644 index 0000000..9b15bff --- /dev/null +++ b/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/pkg/longpath/longpath_test.go b/pkg/longpath/longpath_test.go new file mode 100644 index 0000000..01865ef --- /dev/null +++ b/pkg/longpath/longpath_test.go @@ -0,0 +1,22 @@ +package longpath + +import ( + "strings" + "testing" +) + +func TestStandardLongPath(t *testing.T) { + c := `C:\simple\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\C:\simple\path`) { + t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) + } +} + +func TestUNCLongPath(t *testing.T) { + c := `\\server\share\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { + t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) + } +} diff --git a/pkg/loopback/attach_loopback.go b/pkg/loopback/attach_loopback.go new file mode 100644 index 0000000..dafa94b --- /dev/null +++ b/pkg/loopback/attach_loopback.go @@ -0,0 +1,137 @@ +// +build linux,cgo + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/pkg/loopback/ioctl.go b/pkg/loopback/ioctl.go new file mode 100644 index 0000000..ea68419 --- /dev/null +++ b/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux,cgo + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/pkg/loopback/loop_wrapper.go b/pkg/loopback/loop_wrapper.go new file mode 100644 index 0000000..a50de7f --- /dev/null +++ b/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux,cgo + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/pkg/loopback/loopback.go b/pkg/loopback/loopback.go new file mode 100644 index 0000000..c2d91da --- /dev/null +++ b/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux,cgo + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/pkg/mount/flags.go b/pkg/mount/flags.go new file mode 100644 index 0000000..607dbed --- /dev/null +++ b/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/pkg/mount/flags_freebsd.go b/pkg/mount/flags_freebsd.go new file mode 100644 index 0000000..5f76f33 --- /dev/null +++ b/pkg/mount/flags_freebsd.go @@ -0,0 +1,49 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 + mntDetach = 0 +) diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go new file mode 100644 index 0000000..25f4661 --- /dev/null +++ b/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount + +import ( + "syscall" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = syscall.MS_STRICTATIME + + mntDetach = syscall.MNT_DETACH +) diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go new file mode 100644 index 0000000..9ed741e --- /dev/null +++ b/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go new file mode 100644 index 0000000..c9fdfd6 --- /dev/null +++ b/pkg/mount/mount.go @@ -0,0 +1,86 @@ +package mount + +import ( + "sort" + "strings" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return unmount(target, mntDetach) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Sort(sort.Reverse(byMountpoint(mounts))) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { + return err + } + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there ane any submounts remaining + } + } + return nil +} diff --git a/pkg/mount/mount_unix_test.go b/pkg/mount/mount_unix_test.go new file mode 100644 index 0000000..253aff3 --- /dev/null +++ b/pkg/mount/mount_unix_test.go @@ -0,0 +1,162 @@ +// +build !windows,!solaris + +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} + +func TestMergeTmpfsOptions(t *testing.T) { + options := []string{"noatime", "ro", "size=10k", "defaults", "atime", "defaults", "rw", "rprivate", "size=1024k", "slave"} + expected := []string{"atime", "rw", "size=1024k", "slave"} + merged, err := MergeTmpfsOptions(options) + if err != nil { + t.Fatal(err) + } + if len(expected) != len(merged) { + t.Fatalf("Expected %s got %s", expected, merged) + } + for index := range merged { + if merged[index] != expected[index] { + t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged) + } + } + + options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"} + _, err = MergeTmpfsOptions(options) + if err == nil { + t.Fatal("Expected error got nil") + } +} diff --git a/pkg/mount/mounter_freebsd.go b/pkg/mount/mounter_freebsd.go new file mode 100644 index 0000000..bb870e6 --- /dev/null +++ b/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/pkg/mount/mounter_linux.go b/pkg/mount/mounter_linux.go new file mode 100644 index 0000000..3ef2ce6 --- /dev/null +++ b/pkg/mount/mounter_linux.go @@ -0,0 +1,56 @@ +package mount + +import ( + "syscall" +) + +const ( + // ptypes is the set propagation types. + ptypes = syscall.MS_SHARED | syscall.MS_PRIVATE | syscall.MS_SLAVE | syscall.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | syscall.MS_REC | syscall.MS_SILENT + + // broflags is the combination of bind and read only + broflags = syscall.MS_BIND | syscall.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&syscall.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) { + // Initial call applying all non-propagation flags. + if err := syscall.Mount(device, target, mType, oflags, data); err != nil { + return err + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := syscall.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return syscall.Mount("", target, "", oflags|syscall.MS_REMOUNT, "") + } + + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/pkg/mount/mounter_linux_test.go b/pkg/mount/mounter_linux_test.go new file mode 100644 index 0000000..9c74b1f --- /dev/null +++ b/pkg/mount/mounter_linux_test.go @@ -0,0 +1,195 @@ +// +build linux + +package mount + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMount(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("not root tests would fail") + } + + source, err := ioutil.TempDir("", "mount-test-source-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(source) + + // Ensure we have a known start point by mounting tmpfs with given options + if err := Mount("tmpfs", source, "tmpfs", "private"); err != nil { + t.Fatal(err) + } + defer ensureUnmount(t, source) + validateMount(t, source, "", "") + if t.Failed() { + t.FailNow() + } + + target, err := ioutil.TempDir("", "mount-test-target-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(target) + + tests := []struct { + source string + ftype string + options string + expectedOpts string + expectedOptional string + }{ + // No options + {"tmpfs", "tmpfs", "", "", ""}, + // Default rw / ro test + {source, "", "bind", "", ""}, + {source, "", "bind,private", "", ""}, + {source, "", "bind,shared", "", "shared"}, + {source, "", "bind,slave", "", "master"}, + {source, "", "bind,unbindable", "", "unbindable"}, + // Read Write tests + {source, "", "bind,rw", "rw", ""}, + {source, "", "bind,rw,private", "rw", ""}, + {source, "", "bind,rw,shared", "rw", "shared"}, + {source, "", "bind,rw,slave", "rw", "master"}, + {source, "", "bind,rw,unbindable", "rw", "unbindable"}, + // Read Only tests + {source, "", "bind,ro", "ro", ""}, + {source, "", "bind,ro,private", "ro", ""}, + {source, "", "bind,ro,shared", "ro", "shared"}, + {source, "", "bind,ro,slave", "ro", "master"}, + {source, "", "bind,ro,unbindable", "ro", "unbindable"}, + } + + for _, tc := range tests { + ftype, options := tc.ftype, tc.options + if tc.ftype == "" { + ftype = "none" + } + if tc.options == "" { + options = "none" + } + + t.Run(fmt.Sprintf("%v-%v", ftype, options), func(t *testing.T) { + if strings.Contains(tc.options, "slave") { + // Slave requires a shared source + if err := MakeShared(source); err != nil { + t.Fatal(err) + } + defer func() { + if err := MakePrivate(source); err != nil { + t.Fatal(err) + } + }() + } + if err := Mount(tc.source, target, tc.ftype, tc.options); err != nil { + t.Fatal(err) + } + defer ensureUnmount(t, target) + validateMount(t, target, tc.expectedOpts, tc.expectedOptional) + }) + } +} + +// ensureUnmount umounts mnt checking for errors +func ensureUnmount(t *testing.T, mnt string) { + if err := Unmount(mnt); err != nil { + t.Error(err) + } +} + +// validateMount checks that mnt has the given options +func validateMount(t *testing.T, mnt string, opts, optional string) { + info, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + wantedOpts := make(map[string]struct{}) + if opts != "" { + for _, opt := range strings.Split(opts, ",") { + wantedOpts[opt] = struct{}{} + } + } + + wantedOptional := make(map[string]struct{}) + if optional != "" { + for _, opt := range strings.Split(optional, ",") { + wantedOptional[opt] = struct{}{} + } + } + + mnts := make(map[int]*Info, len(info)) + for _, mi := range info { + mnts[mi.ID] = mi + } + + for _, mi := range info { + if mi.Mountpoint != mnt { + continue + } + + // Use parent info as the defaults + p := mnts[mi.Parent] + pOpts := make(map[string]struct{}) + if p.Opts != "" { + for _, opt := range strings.Split(p.Opts, ",") { + pOpts[clean(opt)] = struct{}{} + } + } + pOptional := make(map[string]struct{}) + if p.Optional != "" { + for _, field := range strings.Split(p.Optional, ",") { + pOptional[clean(field)] = struct{}{} + } + } + + // Validate Opts + if mi.Opts != "" { + for _, opt := range strings.Split(mi.Opts, ",") { + opt = clean(opt) + if !has(wantedOpts, opt) && !has(pOpts, opt) { + t.Errorf("unexpected mount option %q expected %q", opt, opts) + } + delete(wantedOpts, opt) + } + } + for opt := range wantedOpts { + t.Errorf("missing mount option %q found %q", opt, mi.Opts) + } + + // Validate Optional + if mi.Optional != "" { + for _, field := range strings.Split(mi.Optional, ",") { + field = clean(field) + if !has(wantedOptional, field) && !has(pOptional, field) { + t.Errorf("unexpected optional failed %q expected %q", field, optional) + } + delete(wantedOptional, field) + } + } + for field := range wantedOptional { + t.Errorf("missing optional field %q found %q", field, mi.Optional) + } + + return + } + + t.Errorf("failed to find mount %q", mnt) +} + +// clean strips off any value param after the colon +func clean(v string) string { + return strings.SplitN(v, ":", 2)[0] +} + +// has returns true if key is a member of m +func has(m map[string]struct{}, key string) bool { + _, ok := m[key] + return ok +} diff --git a/pkg/mount/mounter_solaris.go b/pkg/mount/mounter_solaris.go new file mode 100644 index 0000000..c684aa8 --- /dev/null +++ b/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/pkg/mount/mounter_unsupported.go b/pkg/mount/mounter_unsupported.go new file mode 100644 index 0000000..a2a3bb4 --- /dev/null +++ b/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go new file mode 100644 index 0000000..ff4cc1d --- /dev/null +++ b/pkg/mount/mountinfo.go @@ -0,0 +1,54 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} + +type byMountpoint []*Info + +func (by byMountpoint) Len() int { + return len(by) +} + +func (by byMountpoint) Less(i, j int) bool { + return by[i].Mountpoint < by[j].Mountpoint +} + +func (by byMountpoint) Swap(i, j int) { + by[i], by[j] = by[j], by[i] +} diff --git a/pkg/mount/mountinfo_freebsd.go b/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 0000000..4f32edc --- /dev/null +++ b/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go new file mode 100644 index 0000000..be69fee --- /dev/null +++ b/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/pkg/mount/mountinfo_linux_test.go b/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 0000000..bd100e1 --- /dev/null +++ b/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,476 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := Info{ + ID: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} diff --git a/pkg/mount/mountinfo_solaris.go b/pkg/mount/mountinfo_solaris.go new file mode 100644 index 0000000..ad9ab57 --- /dev/null +++ b/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/pkg/mount/mountinfo_unsupported.go b/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 0000000..7fbcf19 --- /dev/null +++ b/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/pkg/mount/mountinfo_windows.go b/pkg/mount/mountinfo_windows.go new file mode 100644 index 0000000..dab8a37 --- /dev/null +++ b/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000..8ceec84 --- /dev/null +++ b/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/pkg/mount/sharedsubtree_linux_test.go b/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 0000000..c183794 --- /dev/null +++ b/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propagated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propagate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is available in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable", sourceDir) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} diff --git a/pkg/mount/sharedsubtree_solaris.go b/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 0000000..09f6b03 --- /dev/null +++ b/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/pkg/namesgenerator/cmd/names-generator/main.go b/pkg/namesgenerator/cmd/names-generator/main.go new file mode 100644 index 0000000..18a939b --- /dev/null +++ b/pkg/namesgenerator/cmd/names-generator/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "fmt" + + "github.com/docker/docker/pkg/namesgenerator" +) + +func main() { + fmt.Println(namesgenerator.GetRandomName(0)) +} diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go new file mode 100644 index 0000000..2f869ed --- /dev/null +++ b/pkg/namesgenerator/names-generator.go @@ -0,0 +1,606 @@ +package namesgenerator + +import ( + "fmt" + "math/rand" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "awesome", + "blissful", + "boring", + "brave", + "clever", + "cocky", + "compassionate", + "competent", + "condescending", + "confident", + "cranky", + "dazzling", + "determined", + "distracted", + "dreamy", + "eager", + "ecstatic", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "fervent", + "festive", + "flamboyant", + "focused", + "friendly", + "frosty", + "gallant", + "gifted", + "goofy", + "gracious", + "happy", + "hardcore", + "heuristic", + "hopeful", + "hungry", + "infallible", + "inspiring", + "jolly", + "jovial", + "keen", + "kind", + "laughing", + "loving", + "lucid", + "mystifying", + "modest", + "musing", + "naughty", + "nervous", + "nifty", + "nostalgic", + "objective", + "optimistic", + "peaceful", + "pedantic", + "pensive", + "practical", + "priceless", + "quirky", + "quizzical", + "relaxed", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "silly", + "sleepy", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "trusting", + "unruffled", + "upbeat", + "vibrant", + "vigilant", + "vigorous", + "wizardly", + "wonderful", + "xenodochial", + "youthful", + "zealous", + "zen", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-ḤarrānÄ« al-BattānÄ« was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver + "beaver", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz + "benz", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) + "shannon", + + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke + "clarke", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // This entry reflects a husband and wife team who worked together: + // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran + // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran + "curran", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison + "edison", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt + "haibt", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann + "hermann", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) + "jackson", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson + "johnson", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler + "kepler", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport + "lamport", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin + "lewin", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture + "neumann", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform + "nightingale", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles + "wiles", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { +begin: + name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rand.Intn(10)) + } + return name +} diff --git a/pkg/namesgenerator/names-generator_test.go b/pkg/namesgenerator/names-generator_test.go new file mode 100644 index 0000000..d1a9497 --- /dev/null +++ b/pkg/namesgenerator/names-generator_test.go @@ -0,0 +1,27 @@ +package namesgenerator + +import ( + "strings" + "testing" +) + +func TestNameFormat(t *testing.T) { + name := GetRandomName(0) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name contains numbers!") + } +} + +func TestNameRetries(t *testing.T) { + name := GetRandomName(1) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if !strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name doesn't contain a number") + } + +} diff --git a/pkg/parsers/kernel/kernel.go b/pkg/parsers/kernel/kernel.go new file mode 100644 index 0000000..7738fc7 --- /dev/null +++ b/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/pkg/parsers/kernel/kernel_darwin.go b/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 0000000..71f205b --- /dev/null +++ b/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/pkg/parsers/kernel/kernel_unix.go b/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 0000000..bd137df --- /dev/null +++ b/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/Sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/pkg/parsers/kernel/kernel_unix_test.go b/pkg/parsers/kernel/kernel_unix_test.go new file mode 100644 index 0000000..dc8c0e3 --- /dev/null +++ b/pkg/parsers/kernel/kernel_unix_test.go @@ -0,0 +1,96 @@ +// +build !windows + +package kernel + +import ( + "fmt" + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { + var ( + a *VersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(*a, *b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +// TestParseRelease tests the ParseRelease() function +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } +} + +func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +// TestCompareKernelVersion tests the CompareKernelVersion() function +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 5}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 0, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 7, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + -1) +} diff --git a/pkg/parsers/kernel/kernel_windows.go b/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 0000000..80fab8f --- /dev/null +++ b/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,69 @@ +// +build windows + +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/pkg/parsers/kernel/uname_linux.go b/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 0000000..bb9b326 --- /dev/null +++ b/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthrough for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/pkg/parsers/kernel/uname_solaris.go b/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 0000000..49370bd --- /dev/null +++ b/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/pkg/parsers/kernel/uname_unsupported.go b/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 0000000..1da3f23 --- /dev/null +++ b/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_linux.go b/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 0000000..e04a349 --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,77 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" + + // used by stateless systems like Clear Linux + altOsRelease = "/usr/lib/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + osReleaseFile, err := os.Open(etcOsRelease) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) + } + osReleaseFile, err = os.Open(altOsRelease) + if err != nil { + return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) + } + } + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } + } + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { + return true, nil + } + } + return false, nil +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/pkg/parsers/operatingsystem/operatingsystem_solaris.go new file mode 100644 index 0000000..d08ad14 --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package operatingsystem + +/* +#include +*/ +import "C" + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var etcOsRelease = "/etc/release" + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("\n")); i >= 0 { + b = bytes.Trim(b[:i], " ") + return string(b), nil + } + return "", errors.New("release not found") +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + if C.getzoneid() != 0 { + return true, nil + } + return false, nil +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_unix.go b/pkg/parsers/operatingsystem/operatingsystem_unix.go new file mode 100644 index 0000000..bc91c3c --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_unix.go @@ -0,0 +1,25 @@ +// +build freebsd darwin + +package operatingsystem + +import ( + "errors" + "os/exec" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + cmd := exec.Command("uname", "-s") + osName, err := cmd.Output() + if err != nil { + return "", err + } + return string(osName), nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD and Darwin, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection for freeBSD + return false, errors.New("Cannot detect if we are in container") +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/pkg/parsers/operatingsystem/operatingsystem_unix_test.go new file mode 100644 index 0000000..e7120c6 --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_unix_test.go @@ -0,0 +1,247 @@ +// +build linux freebsd + +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var backup = etcOsRelease + + invalids := []struct { + content string + errorExpected string + }{ + { + `PRETTY_NAME=Source Mage GNU/Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", + }, + { + `PRETTY_NAME="Ubuntu Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME=Ubuntu' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", + }, + } + + valids := []struct { + content string + expected string + }{ + { + `NAME="Ubuntu" +PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`, + "Gentoo/Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Ubuntu 14.04 LTS", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME='Ubuntu 14.04 LTS'`, + "Ubuntu 14.04 LTS", + }, + { + `PRETTY_NAME=Source +NAME="Source Mage"`, + "Source", + }, + { + `PRETTY_NAME=Source +PRETTY_NAME="Source Mage"`, + "Source Mage", + }, + } + + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + + defer func() { + os.Remove(etcOsRelease) + etcOsRelease = backup + }() + + for _, elt := range invalids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err == nil || err.Error() != elt.errorExpected { + t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) + } + } + + for _, elt := range valids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != elt.expected { + t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope +8:net_cls,net_prio:/ +7:cpuset:/ +6:freezer:/ +5:devices:/init.scope +4:blkio:/init.scope +3:cpu,cpuacct:/init.scope +2:perf_event:/ +1:name=systemd:/init.scope +`) + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} + +func TestOsReleaseFallback(t *testing.T) { + var backup = etcOsRelease + var altBackup = altOsRelease + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + altOsRelease = filepath.Join(dir, "altOsRelease") + + defer func() { + os.Remove(dir) + etcOsRelease = backup + altOsRelease = altBackup + }() + content := `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +` + if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != "Gentoo/Linux" { + t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err) + } +} diff --git a/pkg/parsers/operatingsystem/operatingsystem_windows.go b/pkg/parsers/operatingsystem/operatingsystem_windows.go new file mode 100644 index 0000000..3c86b6a --- /dev/null +++ b/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -0,0 +1,49 @@ +package operatingsystem + +import ( + "syscall" + "unsafe" +) + +// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c +// for a similar sample + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + + var h syscall.Handle + + // Default return value + ret := "Unknown Operating System" + + if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return ret, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err := syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("ProductName"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return ret, err + } + ret = syscall.UTF16ToString(buf[:]) + + return ret, nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on Windows, always returns false. +func IsContainerized() (bool, error) { + return false, nil +} diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go new file mode 100644 index 0000000..acc8971 --- /dev/null +++ b/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/pkg/parsers/parsers_test.go b/pkg/parsers/parsers_test.go new file mode 100644 index 0000000..7f19e90 --- /dev/null +++ b/pkg/parsers/parsers_test.go @@ -0,0 +1,70 @@ +package parsers + +import ( + "reflect" + "testing" +) + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParseUintList(t *testing.T) { + valids := map[string]map[int]bool{ + "": {}, + "7": {7: true}, + "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, + "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, + "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, + "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, + "03,1-3": {1: true, 2: true, 3: true}, + "3,2,1": {1: true, 2: true, 3: true}, + "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, + } + for k, v := range valids { + out, err := ParseUintList(k) + if err != nil { + t.Fatalf("Expected not to fail, got %v", err) + } + if !reflect.DeepEqual(out, v) { + t.Fatalf("Expected %v, got %v", v, out) + } + } + + invalids := []string{ + "this", + "1--", + "1-10,,10", + "10-1", + "-1", + "-1,0", + } + for _, v := range invalids { + if out, err := ParseUintList(v); err == nil { + t.Fatalf("Expected failure with %s but got %v", v, out) + } + } +} diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go new file mode 100644 index 0000000..0fc3997 --- /dev/null +++ b/pkg/pidfile/pidfile.go @@ -0,0 +1,53 @@ +// Package pidfile provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// PIDFile is a file used to store the process ID of a running process. +type PIDFile struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if processExists(pid) { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PIDfile using the specified path. +func New(path string) (*PIDFile, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + // Note MkdirAll returns nil if a directory already exists + if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755), ""); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PIDFile{path: path}, nil +} + +// Remove removes the PIDFile. +func (file PIDFile) Remove() error { + return os.Remove(file.path) +} diff --git a/pkg/pidfile/pidfile_darwin.go b/pkg/pidfile/pidfile_darwin.go new file mode 100644 index 0000000..f490f53 --- /dev/null +++ b/pkg/pidfile/pidfile_darwin.go @@ -0,0 +1,14 @@ +// +build darwin + +package pidfile + +import ( + "syscall" +) + +func processExists(pid int) bool { + // OS X does not have a proc filesystem. + // Use kill -0 pid to judge if the process exists. + err := syscall.Kill(pid, 0) + return err == nil +} diff --git a/pkg/pidfile/pidfile_test.go b/pkg/pidfile/pidfile_test.go new file mode 100644 index 0000000..73e8af7 --- /dev/null +++ b/pkg/pidfile/pidfile_test.go @@ -0,0 +1,38 @@ +package pidfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestNewAndRemove(t *testing.T) { + dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") + if err != nil { + t.Fatal("Could not create test directory") + } + + path := filepath.Join(dir, "testfile") + file, err := New(path) + if err != nil { + t.Fatal("Could not create test file", err) + } + + _, err = New(path) + if err == nil { + t.Fatal("Test file creation not blocked") + } + + if err := file.Remove(); err != nil { + t.Fatal("Could not delete created test file") + } +} + +func TestRemoveInvalidPath(t *testing.T) { + file := PIDFile{path: filepath.Join("foo", "bar")} + + if err := file.Remove(); err == nil { + t.Fatal("Non-existing file doesn't give an error on delete") + } +} diff --git a/pkg/pidfile/pidfile_unix.go b/pkg/pidfile/pidfile_unix.go new file mode 100644 index 0000000..1bf5221 --- /dev/null +++ b/pkg/pidfile/pidfile_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!darwin + +package pidfile + +import ( + "os" + "path/filepath" + "strconv" +) + +func processExists(pid int) bool { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return true + } + return false +} diff --git a/pkg/pidfile/pidfile_windows.go b/pkg/pidfile/pidfile_windows.go new file mode 100644 index 0000000..ae489c6 --- /dev/null +++ b/pkg/pidfile/pidfile_windows.go @@ -0,0 +1,23 @@ +package pidfile + +import "syscall" + +const ( + processQueryLimitedInformation = 0x1000 + + stillActive = 259 +) + +func processExists(pid int) bool { + h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) + if err != nil { + return false + } + var c uint32 + err = syscall.GetExitCodeProcess(h, &c) + syscall.Close(h) + if err != nil { + return c == stillActive + } + return true +} diff --git a/pkg/platform/architecture_linux.go b/pkg/platform/architecture_linux.go new file mode 100644 index 0000000..2cdc2c5 --- /dev/null +++ b/pkg/platform/architecture_linux.go @@ -0,0 +1,16 @@ +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "syscall" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + utsname := &syscall.Utsname{} + if err := syscall.Uname(utsname); err != nil { + return "", err + } + return charsToString(utsname.Machine), nil +} diff --git a/pkg/platform/architecture_unix.go b/pkg/platform/architecture_unix.go new file mode 100644 index 0000000..45bbcf1 --- /dev/null +++ b/pkg/platform/architecture_unix.go @@ -0,0 +1,20 @@ +// +build freebsd solaris darwin + +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "os/exec" + "strings" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("/usr/bin/uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(machine)), nil +} diff --git a/pkg/platform/architecture_windows.go b/pkg/platform/architecture_windows.go new file mode 100644 index 0000000..c5f684d --- /dev/null +++ b/pkg/platform/architecture_windows.go @@ -0,0 +1,60 @@ +package platform + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") +) + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx +type systeminfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// Constants +const ( + ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 + ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 + ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL + ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + switch sysinfo.wProcessorArchitecture { + case ProcessorArchitecture64, ProcessorArchitectureIA64: + return "x86_64", nil + case ProcessorArchitecture32: + return "i686", nil + case ProcessorArchitectureArm: + return "arm", nil + default: + return "", fmt.Errorf("Unknown processor architecture") + } +} + +// NumProcs returns the number of processors on the system +func NumProcs() uint32 { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + return sysinfo.dwNumberOfProcessors +} diff --git a/pkg/platform/platform.go b/pkg/platform/platform.go new file mode 100644 index 0000000..e4b0312 --- /dev/null +++ b/pkg/platform/platform.go @@ -0,0 +1,23 @@ +package platform + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +var ( + // Architecture holds the runtime architecture of the process. + Architecture string + // OSType holds the runtime operating system type (Linux, …) of the process. + OSType string +) + +func init() { + var err error + Architecture, err = runtimeArchitecture() + if err != nil { + logrus.Errorf("Could not read system architecture info: %v", err) + } + OSType = runtime.GOOS +} diff --git a/pkg/platform/utsname_int8.go b/pkg/platform/utsname_int8.go new file mode 100644 index 0000000..5dcbadf --- /dev/null +++ b/pkg/platform/utsname_int8.go @@ -0,0 +1,18 @@ +// +build linux,386 linux,amd64 linux,arm64 +// see golang's sources src/syscall/ztypes_linux_*.go that use int8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of signed int8 +func charsToString(ca [65]int8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = uint8(ca[lens]) + } + return string(s[0:lens]) +} diff --git a/pkg/platform/utsname_int8_test.go b/pkg/platform/utsname_int8_test.go new file mode 100644 index 0000000..9dada23 --- /dev/null +++ b/pkg/platform/utsname_int8_test.go @@ -0,0 +1,16 @@ +// +build linux,386 linux,amd64 linux,arm64 + +package platform + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCharToString(t *testing.T) { + machineInBytes := [65]int8{120, 56, 54, 95, 54, 52} + machineInString := charsToString(machineInBytes) + assert.NotNil(t, machineInString, "Unable to convert char into string.") + assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.") +} diff --git a/pkg/platform/utsname_uint8.go b/pkg/platform/utsname_uint8.go new file mode 100644 index 0000000..c9875cf --- /dev/null +++ b/pkg/platform/utsname_uint8.go @@ -0,0 +1,18 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x +// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of unsigned uint8 +func charsToString(ca [65]uint8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = ca[lens] + } + return string(s[0:lens]) +} diff --git a/pkg/platform/utsname_uint8_test.go b/pkg/platform/utsname_uint8_test.go new file mode 100644 index 0000000..444b83b --- /dev/null +++ b/pkg/platform/utsname_uint8_test.go @@ -0,0 +1,16 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x + +package platform + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTestCharToString(t *testing.T) { + machineInBytes := [65]uint8{120, 56, 54, 95, 54, 52} + machineInString := charsToString(machineInBytes) + assert.NotNil(t, machineInString, "Unable to convert char into string.") + assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.") +} diff --git a/pkg/plugingetter/getter.go b/pkg/plugingetter/getter.go new file mode 100644 index 0000000..b04b7bc --- /dev/null +++ b/pkg/plugingetter/getter.go @@ -0,0 +1,35 @@ +package plugingetter + +import "github.com/docker/docker/pkg/plugins" + +const ( + // Lookup doesn't update RefCount + Lookup = 0 + // Acquire increments RefCount + Acquire = 1 + // Release decrements RefCount + Release = -1 +) + +// CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Client() *plugins.Client + Name() string + BasePath() string + IsV1() bool +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/pkg/plugins/client.go b/pkg/plugins/client.go new file mode 100644 index 0000000..f221a46 --- /dev/null +++ b/pkg/plugins/client.go @@ -0,0 +1,205 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeout), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: timeout, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { + var retries int + start := time.Now() + + for { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return resp.Body, nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/pkg/plugins/client_test.go b/pkg/plugins/client_test.go new file mode 100644 index 0000000..7c519a2 --- /dev/null +++ b/pkg/plugins/client_test.go @@ -0,0 +1,234 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" + "github.com/stretchr/testify/assert" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setupRemotePluginServer() string { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + return server.URL +} + +func teardownRemotePluginServer() { + if server != nil { + server.Close() + } +} + +func TestFailedConnection(t *testing.T) { + c, _ := NewClient("tcp://127.0.0.1:1", &tlsconfig.Options{InsecureSkipVerify: true}) + _, err := c.callWithRetry("Service.Method", nil, false) + if err == nil { + t.Fatal("Unexpected successful connection") + } +} + +func TestFailOnce(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + failed := false + mux.HandleFunc("/Test.FailOnce", func(w http.ResponseWriter, r *http.Request) { + if !failed { + failed = true + panic("Plugin not ready") + } + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + b := strings.NewReader("body") + _, err := c.callWithRetry("Test.FailOnce", b, true) + if err != nil { + t.Fatal(err) + } +} + +func TestEchoInputOutput(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, m, output) + err = c.Call("Test.Echo", nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestBackoff(t *testing.T) { + cases := []struct { + retries int + expTimeOff time.Duration + }{ + {0, time.Duration(1)}, + {1, time.Duration(2)}, + {2, time.Duration(4)}, + {4, time.Duration(16)}, + {6, time.Duration(30)}, + {10, time.Duration(30)}, + } + + for _, c := range cases { + s := c.expTimeOff * time.Second + if d := backoff(c.retries); d != s { + t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) + } + } +} + +func TestAbortRetry(t *testing.T) { + cases := []struct { + timeOff time.Duration + expAbort bool + }{ + {time.Duration(1), false}, + {time.Duration(2), false}, + {time.Duration(10), false}, + {time.Duration(30), true}, + {time.Duration(40), true}, + } + + for _, c := range cases { + s := c.timeOff * time.Second + if a := abort(time.Now(), s); a != c.expAbort { + t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) + } + } +} + +func TestClientScheme(t *testing.T) { + cases := map[string]string{ + "tcp://127.0.0.1:8080": "http", + "unix:///usr/local/plugins/foo": "http", + "http://127.0.0.1:8080": "http", + "https://127.0.0.1:8080": "https", + } + + for addr, scheme := range cases { + u, err := url.Parse(addr) + if err != nil { + t.Fatal(err) + } + s := httpScheme(u) + + if s != scheme { + t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, s) + } + } +} + +func TestNewClientWithTimeout(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + time.Sleep(time.Duration(600) * time.Millisecond) + io.Copy(w, r.Body) + }) + + // setting timeout of 500ms + timeout := time.Duration(500) * time.Millisecond + c, _ := NewClientWithTimeout(addr, &tlsconfig.Options{InsecureSkipVerify: true}, timeout) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err == nil { + t.Fatal("Expected timeout error") + } +} + +func TestClientStream(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + var output Manifest + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + body, err := c.Stream("Test.Echo", m) + if err != nil { + t.Fatal(err) + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&output); err != nil { + t.Fatalf("Test.Echo: error reading plugin resp: %v", err) + } + assert.Equal(t, m, output) +} + +func TestClientSendFile(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + var output Manifest + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(m); err != nil { + t.Fatal(err) + } + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + if err := c.SendFile("Test.Echo", &buf, &output); err != nil { + t.Fatal(err) + } + assert.Equal(t, m, output) +} diff --git a/pkg/plugins/discovery.go b/pkg/plugins/discovery.go new file mode 100644 index 0000000..e99581c --- /dev/null +++ b/pkg/plugins/discovery.go @@ -0,0 +1,131 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/pkg/plugins/discovery_test.go b/pkg/plugins/discovery_test.go new file mode 100644 index 0000000..1a23faa --- /dev/null +++ b/pkg/plugins/discovery_test.go @@ -0,0 +1,152 @@ +package plugins + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Setup(t *testing.T) (string, func()) { + tmpdir, err := ioutil.TempDir("", "docker-test") + if err != nil { + t.Fatal(err) + } + backup := socketsPath + socketsPath = tmpdir + specsPaths = []string{tmpdir} + + return tmpdir, func() { + socketsPath = backup + os.RemoveAll(tmpdir) + } +} + +func TestFileSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []struct { + path string + name string + addr string + fail bool + }{ + // TODO Windows: Factor out the unix:// variants. + {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(c.name) + if c.fail && err == nil { + continue + } + + if err != nil { + t.Fatal(err) + } + + if p.name != c.name { + t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.name) + } + + if p.Addr != c.addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) + } + + if !p.TLSConfig.InsecureSkipVerify { + t.Fatalf("Expected TLS verification to be skipped") + } + } +} + +func TestFileJSONSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" { + t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) + } + + if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" { + t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) + } + + if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" { + t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) + } +} + +func TestFileJSONSpecPluginWithoutTLSConfig(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig != nil { + t.Fatalf("Expected plugin TLSConfig nil, got %v\n", plugin.TLSConfig) + } +} diff --git a/pkg/plugins/discovery_unix.go b/pkg/plugins/discovery_unix.go new file mode 100644 index 0000000..693a47e --- /dev/null +++ b/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/pkg/plugins/discovery_unix_test.go b/pkg/plugins/discovery_unix_test.go new file mode 100644 index 0000000..66f5035 --- /dev/null +++ b/pkg/plugins/discovery_unix_test.go @@ -0,0 +1,100 @@ +// +build !windows + +package plugins + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestLocalSocket(t *testing.T) { + // TODO Windows: Enable a similar version for Windows named pipes + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []string{ + filepath.Join(tmpdir, "echo.sock"), + filepath.Join(tmpdir, "echo", "echo.sock"), + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { + t.Fatal(err) + } + + l, err := net.Listen("unix", c) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + + pp, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(p, pp) { + t.Fatalf("Expected %v, was %v\n", p, pp) + } + + if p.name != "echo" { + t.Fatalf("Expected plugin `echo`, got %s\n", p.name) + } + + addr := fmt.Sprintf("unix://%s", c) + if p.Addr != addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) + } + if !p.TLSConfig.InsecureSkipVerify { + t.Fatalf("Expected TLS verification to be skipped") + } + l.Close() + } +} + +func TestScan(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + pluginNames, err := Scan() + if err != nil { + t.Fatal(err) + } + if pluginNames != nil { + t.Fatal("Plugin names should be empty.") + } + + path := filepath.Join(tmpdir, "echo.spec") + addr := "unix://var/lib/docker/plugins/echo.sock" + name := "echo" + + err = os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + t.Fatal(err) + } + + err = ioutil.WriteFile(path, []byte(addr), 0644) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(name) + + pluginNamesNotEmpty, err := Scan() + if err != nil { + t.Fatal(err) + } + if p.Name() != pluginNamesNotEmpty[0] { + t.Fatalf("Unable to scan plugin with name %s", p.name) + } +} diff --git a/pkg/plugins/discovery_windows.go b/pkg/plugins/discovery_windows.go new file mode 100644 index 0000000..d7c1fe4 --- /dev/null +++ b/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/pkg/plugins/errors.go b/pkg/plugins/errors.go new file mode 100644 index 0000000..7988471 --- /dev/null +++ b/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/pkg/plugins/plugin_test.go b/pkg/plugins/plugin_test.go new file mode 100644 index 0000000..00fcb85 --- /dev/null +++ b/pkg/plugins/plugin_test.go @@ -0,0 +1,156 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "path/filepath" + "runtime" + "sync" + "testing" + "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" + "github.com/stretchr/testify/assert" +) + +const ( + fruitPlugin = "fruit" + fruitImplements = "apple" +) + +// regression test for deadlock in handlers +func TestPluginAddHandler(t *testing.T) { + // make a plugin which is pre-activated + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{"bananas"}} + storage.plugins["qwerty"] = p + + testActive(t, p) + Handle("bananas", func(_ string, _ *Client) {}) + testActive(t, p) +} + +func TestPluginWaitBadPlugin(t *testing.T) { + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.activateErr = errors.New("some junk happened") + testActive(t, p) +} + +func testActive(t *testing.T, p *Plugin) { + done := make(chan struct{}) + go func() { + p.waitActive() + close(done) + }() + + select { + case <-time.After(100 * time.Millisecond): + _, f, l, _ := runtime.Caller(1) + t.Fatalf("%s:%d: deadlock in waitActive", filepath.Base(f), l) + case <-done: + } + +} + +func TestGet(t *testing.T) { + p := &Plugin{name: fruitPlugin, activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{fruitImplements}} + storage.plugins[fruitPlugin] = p + + plugin, err := Get(fruitPlugin, fruitImplements) + if err != nil { + t.Fatal(err) + } + if p.Name() != plugin.Name() { + t.Fatalf("No matching plugin with name %s found", plugin.Name()) + } + if plugin.Client() != nil { + t.Fatal("expected nil Client but found one") + } + if !plugin.IsV1() { + t.Fatal("Expected true for V1 plugin") + } + + // check negative case where plugin fruit doesn't implement banana + _, err = Get("fruit", "banana") + assert.Equal(t, err, ErrNotImplements) + + // check negative case where plugin vegetable doesn't exist + _, err = Get("vegetable", "potato") + assert.Equal(t, err, ErrNotFound) + +} + +func TestPluginWithNoManifest(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{fruitImplements}} + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(m); err != nil { + t.Fatal(err) + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, &buf) + }) + + p := &Plugin{ + name: fruitPlugin, + activateWait: sync.NewCond(&sync.Mutex{}), + Addr: addr, + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + } + storage.plugins[fruitPlugin] = p + + plugin, err := Get(fruitPlugin, fruitImplements) + if err != nil { + t.Fatal(err) + } + if p.Name() != plugin.Name() { + t.Fatalf("No matching plugin with name %s found", plugin.Name()) + } +} + +func TestGetAll(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + plugin.Manifest = &Manifest{Implements: []string{"apple"}} + storage.plugins["example"] = plugin + + fetchedPlugins, err := GetAll("apple") + if err != nil { + t.Fatal(err) + } + if fetchedPlugins[0].Name() != plugin.Name() { + t.Fatalf("Expected to get plugin with name %s", plugin.Name()) + } +} diff --git a/pkg/plugins/pluginrpc-gen/README.md b/pkg/plugins/pluginrpc-gen/README.md new file mode 100644 index 0000000..5f6a421 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/README.md @@ -0,0 +1,58 @@ +Plugin RPC Generator +==================== + +Generates go code from a Go interface definition for proxying between the plugin +API and the subsystem being extended. + +## Usage + +Given an interface definition: + +```go +type volumeDriver interface { + Create(name string, opts opts) (err error) + Remove(name string) (err error) + Path(name string) (mountpoint string, err error) + Mount(name string) (mountpoint string, err error) + Unmount(name string) (err error) +} +``` + +**Note**: All function options and return values must be named in the definition. + +Run the generator: + +```bash +$ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go +``` + +Where: +- `--type` is the name of the interface to use +- `--name` is the subsystem that the plugin "Implements" +- `-i` is the input file containing the interface definition +- `-o` is the output file where the generated code should go + +**Note**: The generated code will use the same package name as the one defined in the input file + +Optionally, you can skip functions on the interface that should not be +implemented in the generated proxy code by passing in the function name to `--skip`. +This flag can be specified multiple times. + +You can also add build tags that should be prepended to the generated code by +supplying `--tag`. This flag can be specified multiple times. + +## Known issues + +## go-generate + +You can also use this with go-generate, which is pretty awesome. +To do so, place the code at the top of the file which contains the interface +definition (i.e., the input file): + +```go +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver +``` + +Then cd to the package dir and run `go generate` + +**Note**: the `pluginrpc-gen` binary must be within your `$PATH` diff --git a/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/pkg/plugins/pluginrpc-gen/fixtures/foo.go new file mode 100644 index 0000000..5695dcc --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/fixtures/foo.go @@ -0,0 +1,89 @@ +package foo + +import ( + "fmt" + + aliasedio "io" + + "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" +) + +var ( + errFakeImport = fmt.Errorf("just to import fmt for imports tests") +) + +type wobble struct { + Some string + Val string + Inception *wobble +} + +// Fooer is an empty interface used for tests. +type Fooer interface{} + +// Fooer2 is an interface used for tests. +type Fooer2 interface { + Foo() +} + +// Fooer3 is an interface used for tests. +type Fooer3 interface { + Foo() + Bar(a string) + Baz(a string) (err error) + Qux(a, b string) (val string, err error) + Wobble() (w *wobble) + Wiggle() (w wobble) + WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship) +} + +// Fooer4 is an interface used for tests. +type Fooer4 interface { + Foo() error +} + +// Bar is an interface used for tests. +type Bar interface { + Boo(a string, b string) (s string, err error) +} + +// Fooer5 is an interface used for tests. +type Fooer5 interface { + Foo() + Bar +} + +// Fooer6 is an interface used for tests. +type Fooer6 interface { + Foo(a otherfixture.Spaceship) +} + +// Fooer7 is an interface used for tests. +type Fooer7 interface { + Foo(a *otherfixture.Spaceship) +} + +// Fooer8 is an interface used for tests. +type Fooer8 interface { + Foo(a map[string]otherfixture.Spaceship) +} + +// Fooer9 is an interface used for tests. +type Fooer9 interface { + Foo(a map[string]*otherfixture.Spaceship) +} + +// Fooer10 is an interface used for tests. +type Fooer10 interface { + Foo(a []otherfixture.Spaceship) +} + +// Fooer11 is an interface used for tests. +type Fooer11 interface { + Foo(a []*otherfixture.Spaceship) +} + +// Fooer12 is an interface used for tests. +type Fooer12 interface { + Foo(a aliasedio.Reader) +} diff --git a/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go new file mode 100644 index 0000000..1937d17 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go @@ -0,0 +1,4 @@ +package otherfixture + +// Spaceship is a fixture for tests +type Spaceship struct{} diff --git a/pkg/plugins/pluginrpc-gen/main.go b/pkg/plugins/pluginrpc-gen/main.go new file mode 100644 index 0000000..e77a7d4 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "unicode" + "unicode/utf8" +) + +type stringSet struct { + values map[string]struct{} +} + +func (s stringSet) String() string { + return "" +} + +func (s stringSet) Set(value string) error { + s.values[value] = struct{}{} + return nil +} +func (s stringSet) GetValues() map[string]struct{} { + return s.values +} + +var ( + typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") + rpcName = flag.String("name", *typeName, "RPC name, set if different from type") + inputFile = flag.String("i", "", "input file path") + outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") + + skipFuncs map[string]struct{} + flSkipFuncs = stringSet{make(map[string]struct{})} + + flBuildTags = stringSet{make(map[string]struct{})} +) + +func errorOut(msg string, err error) { + if err == nil { + return + } + fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) + os.Exit(1) +} + +func checkFlags() error { + if *outputFile == "" { + return fmt.Errorf("missing required flag `-o`") + } + if *inputFile == "" { + return fmt.Errorf("missing required flag `-i`") + } + return nil +} + +func main() { + flag.Var(flSkipFuncs, "skip", "skip parsing for function") + flag.Var(flBuildTags, "tag", "build tags to add to generated files") + flag.Parse() + skipFuncs = flSkipFuncs.GetValues() + + errorOut("error", checkFlags()) + + pkg, err := Parse(*inputFile, *typeName) + errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) + + var analysis = struct { + InterfaceType string + RPCName string + BuildTags map[string]struct{} + *ParsedPkg + }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} + var buf bytes.Buffer + + errorOut("parser error", generatedTempl.Execute(&buf, analysis)) + src, err := format.Source(buf.Bytes()) + errorOut("error formatting generated source:\n"+buf.String(), err) + errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) +} + +func toLower(s string) string { + if s == "" { + return "" + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToLower(r)) + s[n:] +} diff --git a/pkg/plugins/pluginrpc-gen/parser.go b/pkg/plugins/pluginrpc-gen/parser.go new file mode 100644 index 0000000..6c547e1 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/parser.go @@ -0,0 +1,263 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "path" + "reflect" + "strings" +) + +var errBadReturn = errors.New("found return arg with no name: all args must be named") + +type errUnexpectedType struct { + expected string + actual interface{} +} + +func (e errUnexpectedType) Error() string { + return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) +} + +// ParsedPkg holds information about a package that has been parsed, +// its name and the list of functions. +type ParsedPkg struct { + Name string + Functions []function + Imports []importSpec +} + +type function struct { + Name string + Args []arg + Returns []arg + Doc string +} + +type arg struct { + Name string + ArgType string + PackageSelector string +} + +func (a *arg) String() string { + return a.Name + " " + a.ArgType +} + +type importSpec struct { + Name string + Path string +} + +func (s *importSpec) String() string { + var ss string + if len(s.Name) != 0 { + ss += s.Name + } + ss += s.Path + return ss +} + +// Parse parses the given file for an interface definition with the given name. +func Parse(filePath string, objName string) (*ParsedPkg, error) { + fs := token.NewFileSet() + pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) + if err != nil { + return nil, err + } + p := &ParsedPkg{} + p.Name = pkg.Name.Name + obj, exists := pkg.Scope.Objects[objName] + if !exists { + return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) + } + if obj.Kind != ast.Typ { + return nil, fmt.Errorf("exected type, got %s", obj.Kind) + } + spec, ok := obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} + } + + p.Functions, err = parseInterface(iface) + if err != nil { + return nil, err + } + + // figure out what imports will be needed + imports := make(map[string]importSpec) + for _, f := range p.Functions { + args := append(f.Args, f.Returns...) + for _, arg := range args { + if len(arg.PackageSelector) == 0 { + continue + } + + for _, i := range pkg.Imports { + if i.Name != nil { + if i.Name.Name != arg.PackageSelector { + continue + } + imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value} + break + } + + _, name := path.Split(i.Path.Value) + splitName := strings.Split(name, "-") + if len(splitName) > 1 { + name = splitName[len(splitName)-1] + } + // import paths have quotes already added in, so need to remove them for name comparison + name = strings.TrimPrefix(name, `"`) + name = strings.TrimSuffix(name, `"`) + if name == arg.PackageSelector { + imports[i.Path.Value] = importSpec{Path: i.Path.Value} + break + } + } + } + } + + for _, spec := range imports { + p.Imports = append(p.Imports, spec) + } + + return p, nil +} + +func parseInterface(iface *ast.InterfaceType) ([]function, error) { + var functions []function + for _, field := range iface.Methods.List { + switch f := field.Type.(type) { + case *ast.FuncType: + method, err := parseFunc(field) + if err != nil { + return nil, err + } + if method == nil { + continue + } + functions = append(functions, *method) + case *ast.Ident: + spec, ok := f.Obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} + } + funcs, err := parseInterface(iface) + if err != nil { + fmt.Println(err) + continue + } + functions = append(functions, funcs...) + default: + return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} + } + } + return functions, nil +} + +func parseFunc(field *ast.Field) (*function, error) { + f := field.Type.(*ast.FuncType) + method := &function{Name: field.Names[0].Name} + if _, exists := skipFuncs[method.Name]; exists { + fmt.Println("skipping:", method.Name) + return nil, nil + } + if f.Params != nil { + args, err := parseArgs(f.Params.List) + if err != nil { + return nil, err + } + method.Args = args + } + if f.Results != nil { + returns, err := parseArgs(f.Results.List) + if err != nil { + return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) + } + method.Returns = returns + } + return method, nil +} + +func parseArgs(fields []*ast.Field) ([]arg, error) { + var args []arg + for _, f := range fields { + if len(f.Names) == 0 { + return nil, errBadReturn + } + for _, name := range f.Names { + p, err := parseExpr(f.Type) + if err != nil { + return nil, err + } + args = append(args, arg{name.Name, p.value, p.pkg}) + } + } + return args, nil +} + +type parsedExpr struct { + value string + pkg string +} + +func parseExpr(e ast.Expr) (parsedExpr, error) { + var parsed parsedExpr + switch i := e.(type) { + case *ast.Ident: + parsed.value += i.Name + case *ast.StarExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.value += "*" + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.SelectorExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.pkg = p.value + parsed.value += p.value + "." + parsed.value += i.Sel.Name + case *ast.MapType: + parsed.value += "map[" + p, err := parseExpr(i.Key) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.value += "]" + p, err = parseExpr(i.Value) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.ArrayType: + parsed.value += "[]" + p, err := parseExpr(i.Elt) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + default: + return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i} + } + return parsed, nil +} diff --git a/pkg/plugins/pluginrpc-gen/parser_test.go b/pkg/plugins/pluginrpc-gen/parser_test.go new file mode 100644 index 0000000..a1b1ac9 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/parser_test.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const testFixture = "fixtures/foo.go" + +func TestParseEmptyInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 0, len(pkg.Functions)) +} + +func TestParseNonInterfaceType(t *testing.T) { + _, err := Parse(testFixture, "wobble") + if _, ok := err.(errUnexpectedType); !ok { + t.Fatal("expected type error when parsing non-interface type") + } +} + +func TestParseWithOneFunction(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer2") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 1, len(pkg.Functions)) + assertName(t, "Foo", pkg.Functions[0].Name) + assertNum(t, 0, len(pkg.Functions[0].Args)) + assertNum(t, 0, len(pkg.Functions[0].Returns)) +} + +func TestParseWithMultipleFuncs(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer3") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 7, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Bar", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + f = pkg.Functions[2] + assertName(t, "Baz", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[3] + assertName(t, "Qux", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", f.Args[0].Name) + assertName(t, "string", f.Args[0].ArgType) + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "val", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[4] + assertName(t, "Wobble", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "*wobble", arg.ArgType) + + f = pkg.Functions[5] + assertName(t, "Wiggle", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "wobble", arg.ArgType) + + f = pkg.Functions[6] + assertName(t, "WiggleWobble", f.Name) + assertNum(t, 6, len(f.Args)) + assertNum(t, 6, len(f.Returns)) + expectedArgs := [][]string{ + {"a", "[]*wobble"}, + {"b", "[]wobble"}, + {"c", "map[string]*wobble"}, + {"d", "map[*wobble]wobble"}, + {"e", "map[string][]wobble"}, + {"f", "[]*otherfixture.Spaceship"}, + } + for i, arg := range f.Args { + assertName(t, expectedArgs[i][0], arg.Name) + assertName(t, expectedArgs[i][1], arg.ArgType) + } + expectedReturns := [][]string{ + {"g", "map[*wobble]wobble"}, + {"h", "[][]*wobble"}, + {"i", "otherfixture.Spaceship"}, + {"j", "*otherfixture.Spaceship"}, + {"k", "map[*otherfixture.Spaceship]otherfixture.Spaceship"}, + {"l", "[]otherfixture.Spaceship"}, + } + for i, ret := range f.Returns { + assertName(t, expectedReturns[i][0], ret.Name) + assertName(t, expectedReturns[i][1], ret.ArgType) + } +} + +func TestParseWithUnamedReturn(t *testing.T) { + _, err := Parse(testFixture, "Fooer4") + if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { + t.Fatalf("expected ErrBadReturn, got %v", err) + } +} + +func TestEmbeddedInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer5") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 2, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Boo", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[0] + assertName(t, "s", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) +} + +func TestParsedImports(t *testing.T) { + cases := []string{"Fooer6", "Fooer7", "Fooer8", "Fooer9", "Fooer10", "Fooer11"} + for _, testCase := range cases { + pkg, err := Parse(testFixture, testCase) + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + importPath := strings.Split(pkg.Imports[0].Path, "/") + assertName(t, "otherfixture\"", importPath[len(importPath)-1]) + assertName(t, "", pkg.Imports[0].Name) + } +} + +func TestAliasedImports(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer12") + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + assertName(t, "aliasedio", pkg.Imports[0].Name) +} + +func assertName(t *testing.T, expected, actual string) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) + } +} + +func assertNum(t *testing.T, expected, actual int) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) + } +} + +func fatalOut(t *testing.T, msg string) { + _, file, ln, _ := runtime.Caller(2) + t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) +} diff --git a/pkg/plugins/pluginrpc-gen/template.go b/pkg/plugins/pluginrpc-gen/template.go new file mode 100644 index 0000000..50ed929 --- /dev/null +++ b/pkg/plugins/pluginrpc-gen/template.go @@ -0,0 +1,118 @@ +package main + +import ( + "strings" + "text/template" +) + +func printArgs(args []arg) string { + var argStr []string + for _, arg := range args { + argStr = append(argStr, arg.String()) + } + return strings.Join(argStr, ", ") +} + +func buildImports(specs []importSpec) string { + if len(specs) == 0 { + return `import "errors"` + } + imports := "import(\n" + imports += "\t\"errors\"\n" + for _, i := range specs { + imports += "\t" + i.String() + "\n" + } + imports += ")" + return imports +} + +func marshalType(t string) string { + switch t { + case "error": + // convert error types to plain strings to ensure the values are encoded/decoded properly + return "string" + default: + return t + } +} + +func isErr(t string) bool { + switch t { + case "error": + return true + default: + return false + } +} + +// Need to use this helper due to issues with go-vet +func buildTag(s string) string { + return "+build " + s +} + +var templFuncs = template.FuncMap{ + "printArgs": printArgs, + "marshalType": marshalType, + "isErr": isErr, + "lower": strings.ToLower, + "title": title, + "tag": buildTag, + "imports": buildImports, +} + +func title(s string) string { + if strings.ToLower(s) == "id" { + return "ID" + } + return strings.Title(s) +} + +var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` +// generated code - DO NOT EDIT +{{ range $k, $v := .BuildTags }} + // {{ tag $k }} {{ end }} + +package {{ .Name }} + +{{ imports .Imports }} + +type client interface{ + Call(string, interface{}, interface{}) error +} + +type {{ .InterfaceType }}Proxy struct { + client +} + +{{ range .Functions }} + type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ + {{ range .Args }} + {{ title .Name }} {{ .ArgType }} {{ end }} + } + + type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ + {{ range .Returns }} + {{ title .Name }} {{ marshalType .ArgType }} {{ end }} + } + + func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { + var( + req {{ $.InterfaceType }}Proxy{{ .Name }}Request + ret {{ $.InterfaceType }}Proxy{{ .Name }}Response + ) + {{ range .Args }} + req.{{ title .Name }} = {{ lower .Name }} {{ end }} + if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { + return + } + {{ range $r := .Returns }} + {{ if isErr .ArgType }} + if ret.{{ title .Name }} != "" { + {{ lower .Name }} = errors.New(ret.{{ title .Name }}) + } {{ end }} + {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} + + return + } +{{ end }} +`)) diff --git a/pkg/plugins/plugins.go b/pkg/plugins/plugins.go new file mode 100644 index 0000000..c0059cb --- /dev/null +++ b/pkg/plugins/plugins.go @@ -0,0 +1,329 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins + +import ( + "errors" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/pkg/plugins/plugins_unix.go b/pkg/plugins/plugins_unix.go new file mode 100644 index 0000000..02f1da6 --- /dev/null +++ b/pkg/plugins/plugins_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For v1 plugins, this always returns the host's root directory. +func (p *Plugin) BasePath() string { + return "/" +} diff --git a/pkg/plugins/plugins_windows.go b/pkg/plugins/plugins_windows.go new file mode 100644 index 0000000..3c8d8fe --- /dev/null +++ b/pkg/plugins/plugins_windows.go @@ -0,0 +1,8 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Windows v1 plugins, this returns an empty string, since the plugin is already aware +// of the absolute path of the mount. +func (p *Plugin) BasePath() string { + return "" +} diff --git a/pkg/plugins/transport/http.go b/pkg/plugins/transport/http.go new file mode 100644 index 0000000..5be146a --- /dev/null +++ b/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/pkg/plugins/transport/http_test.go b/pkg/plugins/transport/http_test.go new file mode 100644 index 0000000..b724fd0 --- /dev/null +++ b/pkg/plugins/transport/http_test.go @@ -0,0 +1,20 @@ +package transport + +import ( + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHTTPTransport(t *testing.T) { + var r io.Reader + roundTripper := &http.Transport{} + newTransport := NewHTTPTransport(roundTripper, "http", "0.0.0.0") + request, err := newTransport.NewRequest("", r) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, "POST", request.Method) +} diff --git a/pkg/plugins/transport/transport.go b/pkg/plugins/transport/transport.go new file mode 100644 index 0000000..d7f1e21 --- /dev/null +++ b/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/pkg/pools/pools.go b/pkg/pools/pools.go new file mode 100644 index 0000000..6a111a3 --- /dev/null +++ b/pkg/pools/pools.go @@ -0,0 +1,137 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +const buffer32K = 32 * 1024 + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) +) + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + }, + } +} + +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) +} + +func (bp *bufferPool) Put(b []byte) { + bp.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, buf) + buffer32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/pkg/pools/pools_test.go b/pkg/pools/pools_test.go new file mode 100644 index 0000000..d71cb99 --- /dev/null +++ b/pkg/pools/pools_test.go @@ -0,0 +1,166 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + if _, err = writer.Write([]byte("barfoo")); err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufferPoolPutAndGet(t *testing.T) { + buf := buffer32KPool.Get() + buffer32KPool.Put(buf) +} diff --git a/pkg/progress/progress.go b/pkg/progress/progress.go new file mode 100644 index 0000000..7c3d3a5 --- /dev/null +++ b/pkg/progress/progress.go @@ -0,0 +1,89 @@ +package progress + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // If true, don't show xB/yB + HideCounts bool + // If not empty, use units instead of bytes for counts + Units string + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/pkg/progress/progressreader.go b/pkg/progress/progressreader.go new file mode 100644 index 0000000..6b3927e --- /dev/null +++ b/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/pkg/progress/progressreader_test.go b/pkg/progress/progressreader_test.go new file mode 100644 index 0000000..690e705 --- /dev/null +++ b/pkg/progress/progressreader_test.go @@ -0,0 +1,75 @@ +package progress + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestOutputOnPrematureClose(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + part := make([]byte, 4) + _, err := io.ReadFull(pr, part) + if err != nil { + pr.Close() + t.Fatal(err) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + default: + t.Fatalf("Expected some output when closing prematurely") + } +} + +func TestCompleteSilently(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + out, err := ioutil.ReadAll(pr) + if err != nil { + pr.Close() + t.Fatal(err) + } + if string(out) != "TESTING" { + pr.Close() + t.Fatalf("Unexpected output %q from reader", string(out)) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + t.Fatalf("Should have closed silently when read is complete") + default: + } +} diff --git a/pkg/progress/progresssink.go b/pkg/progress/progresssink.go new file mode 100644 index 0000000..999431d --- /dev/null +++ b/pkg/progress/progresssink.go @@ -0,0 +1,53 @@ +package progress + +import ( + "time" + "sync" + + "golang.org/x/time/rate" +) + +type Sink struct { + sync.Mutex + out Output // Where to send progress bar to + Size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressSink creates a new ProgressSink. +func NewProgressSink(out Output, size int64, id, action string) *Sink { + return &Sink{ + out: out, + Size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Sink) Write(buf []byte) (int, error) { + p.Lock() + defer p.Unlock() + + n := len(buf) + p.current += int64(n) + updateEvery := int64(1024 * 512) //512kB + if p.Size > 0 { + // Update progress for every 1% if 1% < 512kB + if increment := int64(0.01 * float64(p.Size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || p.current == p.Size { + if p.current == p.Size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.Size, LastUpdate: p.current == p.Size}) + } + p.lastUpdate = p.current + } + + return n, nil +} diff --git a/pkg/promise/promise.go b/pkg/promise/promise.go new file mode 100644 index 0000000..dd52b90 --- /dev/null +++ b/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/pkg/promise/promise_test.go b/pkg/promise/promise_test.go new file mode 100644 index 0000000..287213b --- /dev/null +++ b/pkg/promise/promise_test.go @@ -0,0 +1,25 @@ +package promise + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGo(t *testing.T) { + errCh := Go(functionWithError) + er := <-errCh + require.EqualValues(t, "Error Occurred", er.Error()) + + noErrCh := Go(functionWithNoError) + er = <-noErrCh + require.Nil(t, er) +} + +func functionWithError() (err error) { + return errors.New("Error Occurred") +} +func functionWithNoError() (err error) { + return nil +} diff --git a/pkg/pubsub/publisher.go b/pkg/pubsub/publisher.go new file mode 100644 index 0000000..0936461 --- /dev/null +++ b/pkg/pubsub/publisher.go @@ -0,0 +1,111 @@ +package pubsub + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/pkg/pubsub/publisher_test.go b/pkg/pubsub/publisher_test.go new file mode 100644 index 0000000..d6b0a1d --- /dev/null +++ b/pkg/pubsub/publisher_test.go @@ -0,0 +1,142 @@ +package pubsub + +import ( + "fmt" + "testing" + "time" +) + +func TestSendToOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + c := p.Subscribe() + + p.Publish("hi") + + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestSendToMultipleSubs(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + + p.Publish("hi") + + for _, c := range subs { + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } + } +} + +func TestEvictOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + s1 := p.Subscribe() + s2 := p.Subscribe() + + p.Evict(s1) + p.Publish("hi") + if _, ok := <-s1; ok { + t.Fatal("expected s1 to not receive the published message") + } + + msg := <-s2 + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestClosePublisher(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + p.Close() + + for _, c := range subs { + if _, ok := <-c; ok { + t.Fatal("expected all subscriber channels to be closed") + } + } +} + +const sampleText = "test" + +type testSubscriber struct { + dataCh chan interface{} + ch chan error +} + +func (s *testSubscriber) Wait() error { + return <-s.ch +} + +func newTestSubscriber(p *Publisher) *testSubscriber { + ts := &testSubscriber{ + dataCh: p.Subscribe(), + ch: make(chan error), + } + go func() { + for data := range ts.dataCh { + s, ok := data.(string) + if !ok { + ts.ch <- fmt.Errorf("Unexpected type %T", data) + break + } + if s != sampleText { + ts.ch <- fmt.Errorf("Unexpected text %s", s) + break + } + } + close(ts.ch) + }() + return ts +} + +// for testing with -race +func TestPubSubRace(t *testing.T) { + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + s.Wait() + } +} + +func BenchmarkPubSub(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + if err := s.Wait(); err != nil { + b.Fatal(err) + } + } + } +} diff --git a/pkg/reexec/README.md b/pkg/reexec/README.md new file mode 100644 index 0000000..6658f69 --- /dev/null +++ b/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/pkg/reexec/command_linux.go b/pkg/reexec/command_linux.go new file mode 100644 index 0000000..34ae2a9 --- /dev/null +++ b/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/pkg/reexec/command_unix.go b/pkg/reexec/command_unix.go new file mode 100644 index 0000000..778a720 --- /dev/null +++ b/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go new file mode 100644 index 0000000..76edd82 --- /dev/null +++ b/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/pkg/reexec/command_windows.go b/pkg/reexec/command_windows.go new file mode 100644 index 0000000..ca871c4 --- /dev/null +++ b/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/pkg/reexec/reexec.go b/pkg/reexec/reexec.go new file mode 100644 index 0000000..c56671d --- /dev/null +++ b/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/pkg/reexec/reexec_test.go b/pkg/reexec/reexec_test.go new file mode 100644 index 0000000..39e87a4 --- /dev/null +++ b/pkg/reexec/reexec_test.go @@ -0,0 +1,53 @@ +package reexec + +import ( + "os" + "os/exec" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + Register("reexec", func() { + panic("Return Error") + }) + Init() +} + +func TestRegister(t *testing.T) { + defer func() { + if r := recover(); r != nil { + require.Equal(t, `reexec func already registered under name "reexec"`, r) + } + }() + Register("reexec", func() {}) +} + +func TestCommand(t *testing.T) { + cmd := Command("reexec") + w, err := cmd.StdinPipe() + require.NoError(t, err, "Error on pipe creation: %v", err) + defer w.Close() + + err = cmd.Start() + require.NoError(t, err, "Error on re-exec cmd: %v", err) + err = cmd.Wait() + require.EqualError(t, err, "exit status 2") +} + +func TestNaiveSelf(t *testing.T) { + if os.Getenv("TEST_CHECK") == "1" { + os.Exit(2) + } + cmd := exec.Command(naiveSelf(), "-test.run=TestNaiveSelf") + cmd.Env = append(os.Environ(), "TEST_CHECK=1") + err := cmd.Start() + require.NoError(t, err, "Unable to start command") + err = cmd.Wait() + require.EqualError(t, err, "exit status 2") + + os.Args[0] = "mkdir" + assert.NotEqual(t, naiveSelf(), os.Args[0]) +} diff --git a/pkg/registrar/registrar.go b/pkg/registrar/registrar.go new file mode 100644 index 0000000..df12db7 --- /dev/null +++ b/pkg/registrar/registrar.go @@ -0,0 +1,130 @@ +// Package registrar provides name registration. It reserves a name to a given key. +package registrar + +import ( + "errors" + "sync" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") + // ErrNoSuchKey is returned when trying to find the names for a key which is not known + ErrNoSuchKey = errors.New("provided key does not exist") +) + +// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to +// Names must be unique. +// Registrar is safe for concurrent access. +type Registrar struct { + idx map[string][]string + names map[string]string + mu sync.Mutex +} + +// NewRegistrar creates a new Registrar with the an empty index +func NewRegistrar() *Registrar { + return &Registrar{ + idx: make(map[string][]string), + names: make(map[string]string), + } +} + +// Reserve registers a key to a name +// Reserve is idempotent +// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (r *Registrar) Reserve(name, key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if k, exists := r.names[name]; exists { + if k != key { + return ErrNameReserved + } + return nil + } + + r.idx[key] = append(r.idx[key], name) + r.names[name] = key + return nil +} + +// Release releases the reserved name +// Once released, a name can be reserved again +func (r *Registrar) Release(name string) { + r.mu.Lock() + defer r.mu.Unlock() + + key, exists := r.names[name] + if !exists { + return + } + + for i, n := range r.idx[key] { + if n != name { + continue + } + r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) + break + } + + delete(r.names, name) + + if len(r.idx[key]) == 0 { + delete(r.idx, key) + } +} + +// Delete removes all reservations for the passed in key. +// All names reserved to this key are released. +func (r *Registrar) Delete(key string) { + r.mu.Lock() + for _, name := range r.idx[key] { + delete(r.names, name) + } + delete(r.idx, key) + r.mu.Unlock() +} + +// GetNames lists all the reserved names for the given key +func (r *Registrar) GetNames(key string) ([]string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + names, exists := r.idx[key] + if !exists { + return nil, ErrNoSuchKey + } + + ls := make([]string, 0, len(names)) + ls = append(ls, names...) + return ls, nil +} + +// Get returns the key that the passed in name is reserved to +func (r *Registrar) Get(name string) (string, error) { + r.mu.Lock() + key, exists := r.names[name] + r.mu.Unlock() + + if !exists { + return "", ErrNameNotReserved + } + return key, nil +} + +// GetAll returns all registered names +func (r *Registrar) GetAll() map[string][]string { + out := make(map[string][]string) + + r.mu.Lock() + // copy index into out + for id, names := range r.idx { + out[id] = names + } + r.mu.Unlock() + return out +} diff --git a/pkg/registrar/registrar_test.go b/pkg/registrar/registrar_test.go new file mode 100644 index 0000000..0c1ef31 --- /dev/null +++ b/pkg/registrar/registrar_test.go @@ -0,0 +1,119 @@ +package registrar + +import ( + "reflect" + "testing" +) + +func TestReserve(t *testing.T) { + r := NewRegistrar() + + obj := "test1" + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + + obj2 := "test2" + err := r.Reserve("test", obj2) + if err == nil { + t.Fatalf("expected error when reserving an already reserved name to another object") + } + if err != ErrNameReserved { + t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") + } +} + +func TestRelease(t *testing.T) { + r := NewRegistrar() + obj := "testing" + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + r.Release("test") + r.Release("test") // Ensure there is no panic here + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } +} + +func TestGetNames(t *testing.T) { + r := NewRegistrar() + obj := "testing" + names := []string{"test1", "test2"} + + for _, name := range names { + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + } + r.Reserve("test3", "other") + + names2, err := r.GetNames(obj) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(names, names2) { + t.Fatalf("Exepected: %v, Got: %v", names, names2) + } +} + +func TestDelete(t *testing.T) { + r := NewRegistrar() + obj := "testing" + names := []string{"test1", "test2"} + for _, name := range names { + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + } + + r.Reserve("test3", "other") + r.Delete(obj) + + _, err := r.GetNames(obj) + if err == nil { + t.Fatal("expected error getting names for deleted key") + } + + if err != ErrNoSuchKey { + t.Fatal("expected `ErrNoSuchKey`") + } +} + +func TestGet(t *testing.T) { + r := NewRegistrar() + obj := "testing" + name := "test" + + _, err := r.Get(name) + if err == nil { + t.Fatal("expected error when key does not exist") + } + if err != ErrNameNotReserved { + t.Fatal(err) + } + + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + + if _, err = r.Get(name); err != nil { + t.Fatal(err) + } + + r.Delete(obj) + _, err = r.Get(name) + if err == nil { + t.Fatal("expected error when key does not exist") + } + if err != ErrNameNotReserved { + t.Fatal(err) + } +} diff --git a/pkg/signal/README.md b/pkg/signal/README.md new file mode 100644 index 0000000..2b237a5 --- /dev/null +++ b/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go new file mode 100644 index 0000000..68bb77c --- /dev/null +++ b/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go new file mode 100644 index 0000000..946de87 --- /dev/null +++ b/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go new file mode 100644 index 0000000..6b9569b --- /dev/null +++ b/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go new file mode 100644 index 0000000..d418cbe --- /dev/null +++ b/pkg/signal/signal_linux.go @@ -0,0 +1,80 @@ +package signal + +import ( + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/pkg/signal/signal_linux_test.go b/pkg/signal/signal_linux_test.go new file mode 100644 index 0000000..32c056f --- /dev/null +++ b/pkg/signal/signal_linux_test.go @@ -0,0 +1,58 @@ +// +build darwin linux solaris + +package signal + +import ( + "os" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCatchAll(t *testing.T) { + sigs := make(chan os.Signal, 1) + CatchAll(sigs) + defer StopCatch(sigs) + + listOfSignals := map[string]string{ + "CONT": syscall.SIGCONT.String(), + "HUP": syscall.SIGHUP.String(), + "CHLD": syscall.SIGCHLD.String(), + "ILL": syscall.SIGILL.String(), + "FPE": syscall.SIGFPE.String(), + "CLD": syscall.SIGCLD.String(), + } + + for sigStr := range listOfSignals { + signal, ok := SignalMap[sigStr] + if ok { + go func() { + time.Sleep(1 * time.Millisecond) + syscall.Kill(syscall.Getpid(), signal) + }() + + s := <-sigs + assert.EqualValues(t, s.String(), signal.String()) + } + + } +} + +func TestStopCatch(t *testing.T) { + signal, _ := SignalMap["HUP"] + channel := make(chan os.Signal, 1) + CatchAll(channel) + go func() { + + time.Sleep(1 * time.Millisecond) + syscall.Kill(syscall.Getpid(), signal) + }() + signalString := <-channel + assert.EqualValues(t, signalString.String(), signal.String()) + + StopCatch(channel) + _, ok := <-channel + assert.EqualValues(t, ok, false) +} diff --git a/pkg/signal/signal_solaris.go b/pkg/signal/signal_solaris.go new file mode 100644 index 0000000..89576b9 --- /dev/null +++ b/pkg/signal/signal_solaris.go @@ -0,0 +1,42 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Solaris signals. +// SIGINFO and SIGTHR not defined for Solaris +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/pkg/signal/signal_test.go b/pkg/signal/signal_test.go new file mode 100644 index 0000000..df02f5b --- /dev/null +++ b/pkg/signal/signal_test.go @@ -0,0 +1,33 @@ +package signal + +import ( + "syscall" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseSignal(t *testing.T) { + _, checkAtoiError := ParseSignal("0") + assert.EqualError(t, checkAtoiError, "Invalid signal: 0") + + _, error := ParseSignal("SIG") + assert.EqualError(t, error, "Invalid signal: SIG") + + for sigStr := range SignalMap { + responseSignal, error := ParseSignal(sigStr) + assert.NoError(t, error) + signal := SignalMap[sigStr] + assert.EqualValues(t, signal, responseSignal) + } +} + +func TestValidSignalForPlatform(t *testing.T) { + isValidSignal := ValidSignalForPlatform(syscall.Signal(0)) + assert.EqualValues(t, false, isValidSignal) + + for _, sigN := range SignalMap { + isValidSignal = ValidSignalForPlatform(syscall.Signal(sigN)) + assert.EqualValues(t, true, isValidSignal) + } +} diff --git a/pkg/signal/signal_unix.go b/pkg/signal/signal_unix.go new file mode 100644 index 0000000..5d058fd --- /dev/null +++ b/pkg/signal/signal_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go new file mode 100644 index 0000000..c592d37 --- /dev/null +++ b/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package signal + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go new file mode 100644 index 0000000..440f270 --- /dev/null +++ b/pkg/signal/signal_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + SIGPIPE = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go new file mode 100644 index 0000000..638a1ab --- /dev/null +++ b/pkg/signal/trap.go @@ -0,0 +1,103 @@ +package signal + +import ( + "fmt" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while +// the docker daemon is not restarted and also running under systemd. +// Fixes https://github.com/docker/docker/issues/19728 +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT, SIGPIPE here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + if sig == syscall.SIGPIPE { + continue + } + + go func(sig os.Signal) { + logrus.Infof("Processing signal '%v'", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks("") + logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +const stacksLogNameTemplate = "goroutine-stacks-%s.log" + +// DumpStacks appends the runtime stack into file in dir and returns full path +// to that file. +func DumpStacks(dir string) (string, error) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + var f *os.File + if dir != "" { + path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + var err error + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") + } + defer f.Close() + defer f.Sync() + } else { + f = os.Stderr + } + if _, err := f.Write(buf); err != nil { + return "", errors.Wrap(err, "failed to write goroutine stacks") + } + return f.Name(), nil +} diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go new file mode 100644 index 0000000..a018a20 --- /dev/null +++ b/pkg/stdcopy/stdcopy.go @@ -0,0 +1,190 @@ +package stdcopy + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + // Systemerr represents errors originating from the system that make it + // into the the multiplexed stream. + Systemerr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + stream := StdType(buf[stdWriterFdIndex]) + // Check the first byte to know where to write + switch stream { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + case Systemerr: + // If we're on Systemerr, we won't write anywhere. + // NB: if this code changes later, make sure you don't try to write + // to outstream if Systemerr is the stream + out = nil + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // we might have an error from the source mixed up in our multiplexed + // stream. if we do, return it. + if stream == Systemerr { + return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/pkg/stdcopy/stdcopy_test.go b/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 0000000..b3e2c4d --- /dev/null +++ b/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,289 @@ +package stdcopy + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := stdWriter{ + Writer: nil, + prefix: byte(Stdout), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) + } +} + +type errWriter struct { + n int + err error +} + +func (f *errWriter) Write(buf []byte) (int, error) { + return f.n, f.err +} + +func TestWriteWithWriterError(t *testing.T) { + expectedError := errors.New("expected") + expectedReturnedBytes := 10 + writer := NewStdWriter(&errWriter{ + n: stdWriterPrefixLen + expectedReturnedBytes, + err: expectedError}, Stdout) + data := []byte("This won't get written, sigh") + n, err := writer.Write(data) + if err != expectedError { + t.Fatalf("Didn't get expected error.") + } + if n != expectedReturnedBytes { + t.Fatalf("Didn't get expected written bytes %d, got %d.", + expectedReturnedBytes, n) + } +} + +func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { + writer := NewStdWriter(&errWriter{n: -1}, Stdout) + data := []byte("This won't get written, sigh") + actual, _ := writer.Write(data) + if actual != 0 { + t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) + } +} + +func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { + buffer = new(bytes.Buffer) + dstOut := NewStdWriter(buffer, Stdout) + _, err = dstOut.Write(stdOutBytes) + if err != nil { + return + } + dstErr := NewStdWriter(buffer, Stderr) + _, err = dstErr.Write(stdErrBytes) + return +} + +func TestStdCopyWriteAndRead(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err != nil { + t.Fatal(err) + } + expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) + if written != int64(expectedTotalWritten) { + t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) + } +} + +type customReader struct { + n int + err error + totalCalls int + correctCalls int + src *bytes.Buffer +} + +func (f *customReader) Read(buf []byte) (int, error) { + f.totalCalls++ + if f.totalCalls <= f.correctCalls { + return f.src.Read(buf) + } + return f.n, f.err +} + +func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { + expectedError := errors.New("error") + reader := &customReader{ + err: expectedError} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { + expectedError := errors.New("error") + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: expectedError, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyDetectsCorruptedFrame(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: io.EOF, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != startingBufLen { + t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) + } + if err != nil { + t.Fatal("Didn't get nil error") + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func TestStdCopyReturnsWriteErrors(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + expectedError := errors.New("expected") + + dstOut := &errWriter{err: expectedError} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error, got %v", err) + } +} + +func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + dstOut := &errWriter{n: startingBufLen - 10} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) + } + if err != io.ErrShortWrite { + t.Fatalf("Didn't get expected io.ErrShortWrite error") + } +} + +// TestStdCopyReturnsErrorFromSystem tests that StdCopy correctly returns an +// error, when that error is muxed into the Systemerr stream. +func TestStdCopyReturnsErrorFromSystem(t *testing.T) { + // write in the basic messages, just so there's some fluff in there + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + // add in an error message on the Systemerr stream + systemErrBytes := []byte(strings.Repeat("S", startingBufLen)) + systemWriter := NewStdWriter(buffer, Systemerr) + _, err = systemWriter.Write(systemErrBytes) + if err != nil { + t.Fatal(err) + } + + // now copy and demux. we should expect an error containing the string we + // wrote out + _, err = StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err == nil { + t.Fatal("expected error, got none") + } + if !strings.Contains(err.Error(), string(systemErrBytes)) { + t.Fatal("expected error to contain message") + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/streamformatter/streamformatter.go b/pkg/streamformatter/streamformatter.go new file mode 100644 index 0000000..c4f5575 --- /dev/null +++ b/pkg/streamformatter/streamformatter.go @@ -0,0 +1,159 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +const streamNewline = "\r\n" + +type jsonProgressFormatter struct{} + +func appendNewline(source []byte) []byte { + return append(source, []byte(streamNewline)...) +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// FormatError formats the error as a JSON object +func FormatError(err error) []byte { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return appendNewline(b) + } + return []byte(`{"error":"format error"}` + streamNewline) +} + +func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return FormatStatus(id, format, a...) +} + +// formatProgress formats the progress information for a specified action. +func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return appendNewline(b) +} + +type rawProgressFormatter struct{} + +func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return []byte(fmt.Sprintf(format, a...) + streamNewline) +} + +func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func NewProgressOutput(out io.Writer) progress.Output { + return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} +} + +// NewJSONProgressOutput returns a progress.Output that that formats output +// using JSON objects +func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} +} + +type formatProgress interface { + formatStatus(id, format string, a ...interface{}) []byte + formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte +} + +type progressOutput struct { + sf formatProgress + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.formatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} + formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.formatStatus("", "")) + return err + } + + return nil +} + +// AuxFormatter is a streamFormatter that writes aux progress messages +type AuxFormatter struct { + io.Writer +} + +// Emit emits the given interface as an aux progress message +func (sf *AuxFormatter) Emit(aux interface{}) error { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return err + } + auxJSON := new(json.RawMessage) + *auxJSON = auxJSONBytes + msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{Aux: auxJSON}) + if err != nil { + return err + } + msgJSON = appendNewline(msgJSON) + n, err := sf.Writer.Write(msgJSON) + if n != len(msgJSON) { + return io.ErrShortWrite + } + return err +} diff --git a/pkg/streamformatter/streamformatter_test.go b/pkg/streamformatter/streamformatter_test.go new file mode 100644 index 0000000..c5c70d7 --- /dev/null +++ b/pkg/streamformatter/streamformatter_test.go @@ -0,0 +1,109 @@ +package streamformatter + +import ( + "bytes" + "encoding/json" + "errors" + "strings" + "testing" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRawProgressFormatterFormatStatus(t *testing.T) { + sf := rawProgressFormatter{} + res := sf.formatStatus("ID", "%s%d", "a", 1) + assert.Equal(t, "a1\r\n", string(res)) +} + +func TestRawProgressFormatterFormatProgress(t *testing.T) { + sf := rawProgressFormatter{} + jsonProgress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.formatProgress("id", "action", jsonProgress, nil) + out := string(res) + assert.True(t, strings.HasPrefix(out, "action [====")) + assert.Contains(t, out, "15B/30B") + assert.True(t, strings.HasSuffix(out, "\r")) +} + +func TestFormatStatus(t *testing.T) { + res := FormatStatus("ID", "%s%d", "a", 1) + expected := `{"status":"a1","id":"ID"}` + streamNewline + assert.Equal(t, expected, string(res)) +} + +func TestFormatError(t *testing.T) { + res := FormatError(errors.New("Error for formatter")) + expected := `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}` + "\r\n" + assert.Equal(t, expected, string(res)) +} + +func TestFormatJSONError(t *testing.T) { + err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} + res := FormatError(err) + expected := `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}` + streamNewline + assert.Equal(t, expected, string(res)) +} + +func TestJsonProgressFormatterFormatProgress(t *testing.T) { + sf := &jsonProgressFormatter{} + jsonProgress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.formatProgress("id", "action", jsonProgress, &AuxFormatter{Writer: &bytes.Buffer{}}) + msg := &jsonmessage.JSONMessage{} + + require.NoError(t, json.Unmarshal(res, msg)) + assert.Equal(t, "id", msg.ID) + assert.Equal(t, "action", msg.Status) + + // jsonProgress will always be in the format of: + // [=========================> ] 15B/30B 412910h51m30s + // The last entry '404933h7m11s' is the timeLeftBox. + // However, the timeLeftBox field may change as jsonProgress.String() depends on time.Now(). + // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. + + // Compare the jsonProgress strings before the timeLeftBox + expectedProgress := "[=========================> ] 15B/30B" + // if terminal column is <= 110, expectedProgressShort is expected. + expectedProgressShort := " 15B/30B" + if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || + strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { + t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", + expectedProgress, expectedProgressShort, msg.ProgressMessage) + } + + assert.Equal(t, jsonProgress, msg.Progress) +} + +func TestJsonProgressFormatterFormatStatus(t *testing.T) { + sf := jsonProgressFormatter{} + res := sf.formatStatus("ID", "%s%d", "a", 1) + assert.Equal(t, `{"status":"a1","id":"ID"}`+streamNewline, string(res)) +} + +func TestNewJSONProgressOutput(t *testing.T) { + b := bytes.Buffer{} + b.Write(FormatStatus("id", "Downloading")) + _ = NewJSONProgressOutput(&b, false) + assert.Equal(t, `{"status":"Downloading","id":"id"}`+streamNewline, b.String()) +} + +func TestAuxFormatterEmit(t *testing.T) { + b := bytes.Buffer{} + aux := &AuxFormatter{Writer: &b} + sampleAux := &struct { + Data string + }{"Additional data"} + err := aux.Emit(sampleAux) + require.NoError(t, err) + assert.Equal(t, `{"aux":{"Data":"Additional data"}}`+streamNewline, b.String()) +} diff --git a/pkg/streamformatter/streamwriter.go b/pkg/streamformatter/streamwriter.go new file mode 100644 index 0000000..141d12e --- /dev/null +++ b/pkg/streamformatter/streamwriter.go @@ -0,0 +1,47 @@ +package streamformatter + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/pkg/jsonmessage" +) + +type streamWriter struct { + io.Writer + lineFormat func([]byte) string +} + +func (sw *streamWriter) Write(buf []byte) (int, error) { + formattedBuf := sw.format(buf) + n, err := sw.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +func (sw *streamWriter) format(buf []byte) []byte { + msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} + b, err := json.Marshal(msg) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// NewStdoutWriter returns a writer which formats the output as json message +// representing stdout lines +func NewStdoutWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return string(buf) + }} +} + +// NewStderrWriter returns a writer which formats the output as json message +// representing stderr lines +func NewStderrWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return "\033[91m" + string(buf) + "\033[0m" + }} +} diff --git a/pkg/streamformatter/streamwriter_test.go b/pkg/streamformatter/streamwriter_test.go new file mode 100644 index 0000000..4935cc5 --- /dev/null +++ b/pkg/streamformatter/streamwriter_test.go @@ -0,0 +1,35 @@ +package streamformatter + +import ( + "testing" + + "bytes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStreamWriterStdout(t *testing.T) { + buffer := &bytes.Buffer{} + content := "content" + sw := NewStdoutWriter(buffer) + size, err := sw.Write([]byte(content)) + + require.NoError(t, err) + assert.Equal(t, len(content), size) + + expected := `{"stream":"content"}` + streamNewline + assert.Equal(t, expected, buffer.String()) +} + +func TestStreamWriterStderr(t *testing.T) { + buffer := &bytes.Buffer{} + content := "content" + sw := NewStderrWriter(buffer) + size, err := sw.Write([]byte(content)) + + require.NoError(t, err) + assert.Equal(t, len(content), size) + + expected := `{"stream":"\u001b[91mcontent\u001b[0m"}` + streamNewline + assert.Equal(t, expected, buffer.String()) +} diff --git a/pkg/stringid/README.md b/pkg/stringid/README.md new file mode 100644 index 0000000..37a5098 --- /dev/null +++ b/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/pkg/stringid/stringid.go b/pkg/stringid/stringid.go new file mode 100644 index 0000000..a0c7c42 --- /dev/null +++ b/pkg/stringid/stringid.go @@ -0,0 +1,99 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "regexp" + "strconv" + "strings" + "time" +) + +const shortLen = 12 + +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(r io.Reader) string { + b := make([]byte, 32) + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(cryptorand.Reader) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(readerFunc(rand.Read)) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func init() { + // safely set the seed globally so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rand.Seed(seed) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) +} diff --git a/pkg/stringid/stringid_test.go b/pkg/stringid/stringid_test.go new file mode 100644 index 0000000..8ff6b43 --- /dev/null +++ b/pkg/stringid/stringid_test.go @@ -0,0 +1,72 @@ +package stringid + +import ( + "strings" + "testing" +) + +func TestGenerateRandomID(t *testing.T) { + id := GenerateRandomID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestGenerateNonCryptoID(t *testing.T) { + id := GenerateNonCryptoID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestShortenId(t *testing.T) { + id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2" + truncID := TruncateID(id) + if truncID != "90435eec5c4e" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenSha256Id(t *testing.T) { + id := "sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba" + truncID := TruncateID(id) + if truncID != "4e38e38c8ce0" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdEmpty(t *testing.T) { + id := "" + truncID := TruncateID(id) + if len(truncID) > len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdInvalid(t *testing.T) { + id := "1234" + truncID := TruncateID(id) + if len(truncID) != len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestIsShortIDNonHex(t *testing.T) { + id := "some non-hex value" + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} + +func TestIsShortIDNotCorrectSize(t *testing.T) { + id := strings.Repeat("a", shortLen+1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } + id = strings.Repeat("a", shortLen-1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} diff --git a/pkg/stringutils/README.md b/pkg/stringutils/README.md new file mode 100644 index 0000000..b3e4545 --- /dev/null +++ b/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/pkg/stringutils/stringutils.go b/pkg/stringutils/stringutils.go new file mode 100644 index 0000000..8c4c398 --- /dev/null +++ b/pkg/stringutils/stringutils.go @@ -0,0 +1,99 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and an open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/pkg/stringutils/stringutils_test.go b/pkg/stringutils/stringutils_test.go new file mode 100644 index 0000000..8af2bdc --- /dev/null +++ b/pkg/stringutils/stringutils_test.go @@ -0,0 +1,121 @@ +package stringutils + +import "testing" + +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + if len(s) != expectedLength { + t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) + } +} + +func testUniquenessHelper(generator func(int) string, t *testing.T) { + repeats := 25 + set := make(map[string]struct{}, repeats) + for i := 0; i < repeats; i = i + 1 { + str := generator(64) + if len(str) != 64 { + t.Fatalf("Id returned is incorrect: %s", str) + } + if _, ok := set[str]; ok { + t.Fatalf("Random number is repeated") + } + set[str] = struct{}{} + } +} + +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAsciiStringLength(t *testing.T) { + testLengthHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { + str := GenerateRandomASCIIString(64) + if !isASCII(str) { + t.Fatalf("%s contained non-ascii characters", str) + } +} + +func TestEllipsis(t *testing.T) { + str := "t🐳ststring" + newstr := Ellipsis(str, 3) + if newstr != "t🐳s" { + t.Fatalf("Expected t🐳s, got %s", newstr) + } + newstr = Ellipsis(str, 8) + if newstr != "t🐳sts..." { + t.Fatalf("Expected tests..., got %s", newstr) + } + newstr = Ellipsis(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestTruncate(t *testing.T) { + str := "t🐳ststring" + newstr := Truncate(str, 4) + if newstr != "t🐳st" { + t.Fatalf("Expected t🐳st, got %s", newstr) + } + newstr = Truncate(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestInSlice(t *testing.T) { + slice := []string{"t🐳st", "in", "slice"} + + test := InSlice(slice, "t🐳st") + if !test { + t.Fatalf("Expected string t🐳st to be in slice") + } + test = InSlice(slice, "SLICE") + if !test { + t.Fatalf("Expected string SLICE to be in slice") + } + test = InSlice(slice, "notinslice") + if test { + t.Fatalf("Expected string notinslice not to be in slice") + } +} + +func TestShellQuoteArgumentsEmpty(t *testing.T) { + actual := ShellQuoteArguments([]string{}) + expected := "" + if actual != expected { + t.Fatalf("Expected an empty string") + } +} + +func TestShellQuoteArguments(t *testing.T) { + simpleString := "simpleString" + complexString := "This is a 'more' complex $tring with some special char *" + actual := ShellQuoteArguments([]string{simpleString, complexString}) + expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" + if actual != expected { + t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) + } +} diff --git a/pkg/symlink/LICENSE.APACHE b/pkg/symlink/LICENSE.APACHE new file mode 100644 index 0000000..b9fbf3c --- /dev/null +++ b/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/symlink/LICENSE.BSD b/pkg/symlink/LICENSE.BSD new file mode 100644 index 0000000..4c056c5 --- /dev/null +++ b/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2017 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/symlink/README.md b/pkg/symlink/README.md new file mode 100644 index 0000000..8dba54f --- /dev/null +++ b/pkg/symlink/README.md @@ -0,0 +1,6 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, +as well as a Windows long-path aware version of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go new file mode 100644 index 0000000..f6bc223 --- /dev/null +++ b/pkg/symlink/fs.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(filepath.FromSlash(path)) + if err != nil { + return "", err + } + root, err = filepath.Abs(filepath.FromSlash(root)) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if isDriveOrRoot(cleanP) { + // never Lstat "/" itself, or drive letters on Windows + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if system.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/pkg/symlink/fs_unix.go b/pkg/symlink/fs_unix.go new file mode 100644 index 0000000..2270827 --- /dev/null +++ b/pkg/symlink/fs_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package symlink + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} + +func isDriveOrRoot(p string) bool { + return p == string(filepath.Separator) +} diff --git a/pkg/symlink/fs_unix_test.go b/pkg/symlink/fs_unix_test.go new file mode 100644 index 0000000..7085c0b --- /dev/null +++ b/pkg/symlink/fs_unix_test.go @@ -0,0 +1,407 @@ +// +build !windows + +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// TODO Windows: This needs some serious work to port to Windows. For now, +// turning off testing in this package. + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatalf("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/symlink/fs_windows.go b/pkg/symlink/fs_windows.go new file mode 100644 index 0000000..241e531 --- /dev/null +++ b/pkg/symlink/fs_windows.go @@ -0,0 +1,169 @@ +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/longpath" +) + +func toShort(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return syscall.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // syscall.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginning with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} + +func isDriveOrRoot(p string) bool { + if p == string(filepath.Separator) { + return true + } + + length := len(p) + if length >= 2 { + if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { + return true + } + } + return false +} diff --git a/pkg/sysinfo/README.md b/pkg/sysinfo/README.md new file mode 100644 index 0000000..c1530ce --- /dev/null +++ b/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/pkg/sysinfo/numcpu.go b/pkg/sysinfo/numcpu.go new file mode 100644 index 0000000..aeb1a3a --- /dev/null +++ b/pkg/sysinfo/numcpu.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package sysinfo + +import ( + "runtime" +) + +// NumCPU returns the number of CPUs +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/pkg/sysinfo/numcpu_linux.go b/pkg/sysinfo/numcpu_linux.go new file mode 100644 index 0000000..5eacd35 --- /dev/null +++ b/pkg/sysinfo/numcpu_linux.go @@ -0,0 +1,43 @@ +// +build linux + +package sysinfo + +import ( + "runtime" + "syscall" + "unsafe" +) + +// numCPU queries the system for the count of threads available +// for use to this process. +// +// Issues two syscalls. +// Returns 0 on errors. Use |runtime.NumCPU| in that case. +func numCPU() int { + // Gets the affinity mask for a process: The very one invoking this function. + pid, _, _ := syscall.RawSyscall(syscall.SYS_GETPID, 0, 0, 0) + + var mask [1024 / 64]uintptr + _, _, err := syscall.RawSyscall(syscall.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + if err != 0 { + return 0 + } + + // For every available thread a bit is set in the mask. + ncpu := 0 + for _, e := range mask { + if e == 0 { + continue + } + ncpu += int(popcnt(uint64(e))) + } + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/pkg/sysinfo/numcpu_windows.go b/pkg/sysinfo/numcpu_windows.go new file mode 100644 index 0000000..1d89dd5 --- /dev/null +++ b/pkg/sysinfo/numcpu_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package sysinfo + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + getCurrentProcess = kernel32.NewProc("GetCurrentProcess") + getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") +) + +func numCPU() int { + // Gets the affinity mask for a process + var mask, sysmask uintptr + currentProcess, _, _ := getCurrentProcess.Call() + ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + if ret == 0 { + return 0 + } + // For every available thread a bit is set in the mask. + ncpu := int(popcnt(uint64(mask))) + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go new file mode 100644 index 0000000..f046de4 --- /dev/null +++ b/pkg/sysinfo/sysinfo.go @@ -0,0 +1,144 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool + + // Whether CPU real-time period is supported or not + CPURealtimePeriod bool + + // Whether CPU real-time runtime is supported or not + CPURealtimeRuntime bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} + +// Returns bit count of 1, used by NumCPU +func popcnt(x uint64) (n byte) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return byte(x >> 56) +} diff --git a/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 0000000..7ad84a8 --- /dev/null +++ b/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,259 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +const ( + // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. + SeccompModeFilter = uintptr(2) +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warn("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warn("Your kernel does not support oom control") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warn("Your kernel does not support memory swappiness") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warn("Your kernel does not support kernel memory limit") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + + cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us") + if !quiet && !cpuRealtimePeriod { + logrus.Warn("Your kernel does not support cgroup rt period") + } + + cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us") + if !quiet && !cpuRealtimeRuntime { + logrus.Warn("Your kernel does not support cgroup rt runtime") + } + + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + CPURealtimePeriod: cpuRealtimePeriod, + CPURealtimeRuntime: cpuRealtimeRuntime, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warn("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/pkg/sysinfo/sysinfo_linux_test.go b/pkg/sysinfo/sysinfo_linux_test.go new file mode 100644 index 0000000..77c54f2 --- /dev/null +++ b/pkg/sysinfo/sysinfo_linux_test.go @@ -0,0 +1,104 @@ +package sysinfo + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestReadProcBool(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + procFile := filepath.Join(tmpDir, "read-proc-bool") + err = ioutil.WriteFile(procFile, []byte("1"), 0644) + require.NoError(t, err) + + if !readProcBool(procFile) { + t.Fatal("expected proc bool to be true, got false") + } + + if err := ioutil.WriteFile(procFile, []byte("0"), 0644); err != nil { + t.Fatal(err) + } + if readProcBool(procFile) { + t.Fatal("expected proc bool to be false, got true") + } + + if readProcBool(path.Join(tmpDir, "no-exist")) { + t.Fatal("should be false for non-existent entry") + } + +} + +func TestCgroupEnabled(t *testing.T) { + cgroupDir, err := ioutil.TempDir("", "cgroup-test") + require.NoError(t, err) + defer os.RemoveAll(cgroupDir) + + if cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be false") + } + + err = ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 0644) + require.NoError(t, err) + + if !cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be true") + } +} + +func TestNew(t *testing.T) { + sysInfo := New(false) + require.NotNil(t, sysInfo) + checkSysInfo(t, sysInfo) + + sysInfo = New(true) + require.NotNil(t, sysInfo) + checkSysInfo(t, sysInfo) +} + +func checkSysInfo(t *testing.T, sysInfo *SysInfo) { + // Check if Seccomp is supported, via CONFIG_SECCOMP.then sysInfo.Seccomp must be TRUE , else FALSE + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + require.True(t, sysInfo.Seccomp) + } + } else { + require.False(t, sysInfo.Seccomp) + } +} + +func TestNewAppArmorEnabled(t *testing.T) { + // Check if AppArmor is supported. then it must be TRUE , else FALSE + if _, err := os.Stat("/sys/kernel/security/apparmor"); err != nil { + t.Skip("App Armor Must be Enabled") + } + + sysInfo := New(true) + require.True(t, sysInfo.AppArmor) +} + +func TestNewAppArmorDisabled(t *testing.T) { + // Check if AppArmor is supported. then it must be TRUE , else FALSE + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + t.Skip("App Armor Must be Disabled") + } + + sysInfo := New(true) + require.False(t, sysInfo.AppArmor) +} + +func TestNumCPU(t *testing.T) { + cpuNumbers := NumCPU() + if cpuNumbers <= 0 { + t.Fatal("CPU returned must be greater than zero") + } +} diff --git a/pkg/sysinfo/sysinfo_solaris.go b/pkg/sysinfo/sysinfo_solaris.go new file mode 100644 index 0000000..c858d57 --- /dev/null +++ b/pkg/sysinfo/sysinfo_solaris.go @@ -0,0 +1,121 @@ +// +build solaris,cgo + +package sysinfo + +import ( + "bytes" + "os/exec" + "strconv" + "strings" +) + +/* +#cgo LDFLAGS: -llgrp +#include +#include +#include +int getLgrpCount() { + lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; + uint_t nlgrps; + + if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { + return -1; + } + nlgrps = lgrp_nlgrps(lgrpcookie); + return nlgrps; +} +*/ +import "C" + +// IsCPUSharesAvailable returns whether CPUShares setting is supported. +// We need FSS to be set as default scheduling class to support CPU Shares +func IsCPUSharesAvailable() bool { + cmd := exec.Command("/usr/sbin/dispadmin", "-d") + outBuf := new(bytes.Buffer) + errBuf := new(bytes.Buffer) + cmd.Stderr = errBuf + cmd.Stdout = outBuf + + if err := cmd.Run(); err != nil { + return false + } + return (strings.Contains(outBuf.String(), "FSS")) +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. +//NOTE Solaris: If we change the below capabilities be sure +// to update verifyPlatformContainerSettings() in daemon_solaris.go +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + sysInfo.cgroupMemInfo = setCgroupMem(quiet) + sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) + sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) + sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) + + sysInfo.IPv4ForwardingDisabled = false + + sysInfo.AppArmor = false + + return sysInfo +} + +// setCgroupMem reads the memory information for Solaris. +func setCgroupMem(quiet bool) cgroupMemInfo { + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: true, + MemoryReservation: false, + OomKillDisable: false, + MemorySwappiness: false, + KernelMemory: false, + } +} + +// setCgroupCPU reads the cpu information for Solaris. +func setCgroupCPU(quiet bool) cgroupCPUInfo { + + return cgroupCPUInfo{ + CPUShares: true, + CPUCfsPeriod: false, + CPUCfsQuota: true, + CPURealtimePeriod: false, + CPURealtimeRuntime: false, + } +} + +// blkio switches are not supported in Solaris. +func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { + + return cgroupBlkioInfo{ + BlkioWeight: false, + BlkioWeightDevice: false, + } +} + +// setCgroupCPUsetInfo reads the cpuset information for Solaris. +func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: getCPUCount(), + Mems: getLgrpCount(), + } +} + +func getCPUCount() string { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + if ncpus <= 0 { + return "" + } + return strconv.FormatInt(int64(ncpus), 16) +} + +func getLgrpCount() string { + nlgrps := C.getLgrpCount() + if nlgrps <= 0 { + return "" + } + return strconv.FormatInt(int64(nlgrps), 16) +} diff --git a/pkg/sysinfo/sysinfo_test.go b/pkg/sysinfo/sysinfo_test.go new file mode 100644 index 0000000..b61fbcf --- /dev/null +++ b/pkg/sysinfo/sysinfo_test.go @@ -0,0 +1,26 @@ +package sysinfo + +import "testing" + +func TestIsCpusetListAvailable(t *testing.T) { + cases := []struct { + provided string + available string + res bool + err bool + }{ + {"1", "0-4", true, false}, + {"01,3", "0-4", true, false}, + {"", "0-7", true, false}, + {"1--42", "0-7", false, true}, + {"1-42", "00-1,8,,9", false, true}, + {"1,41-42", "43,45", false, false}, + {"0-3", "", false, false}, + } + for _, c := range cases { + r, err := isCpusetListAvailable(c.provided, c.available) + if (c.err && err == nil) && r != c.res { + t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r) + } + } +} diff --git a/pkg/sysinfo/sysinfo_unix.go b/pkg/sysinfo/sysinfo_unix.go new file mode 100644 index 0000000..45f3ef1 --- /dev/null +++ b/pkg/sysinfo/sysinfo_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris,!windows + +package sysinfo + +// New returns an empty SysInfo for non linux nor solaris for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/pkg/sysinfo/sysinfo_windows.go b/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 0000000..4e6255b --- /dev/null +++ b/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package sysinfo + +// New returns an empty SysInfo for windows for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/pkg/system/chtimes.go b/pkg/system/chtimes.go new file mode 100644 index 0000000..056d199 --- /dev/null +++ b/pkg/system/chtimes.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/pkg/system/chtimes_test.go b/pkg/system/chtimes_test.go new file mode 100644 index 0000000..5c87df3 --- /dev/null +++ b/pkg/system/chtimes_test.go @@ -0,0 +1,94 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" +) + +// prepareTempFile creates a temporary file in a temporary directory. +func prepareTempFile(t *testing.T) (string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + return file, dir +} + +// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent +func TestChtimes(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) + } +} diff --git a/pkg/system/chtimes_unix.go b/pkg/system/chtimes_unix.go new file mode 100644 index 0000000..09d58bc --- /dev/null +++ b/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/pkg/system/chtimes_unix_test.go b/pkg/system/chtimes_unix_test.go new file mode 100644 index 0000000..6ec9a71 --- /dev/null +++ b/pkg/system/chtimes_unix_test.go @@ -0,0 +1,91 @@ +// +build !windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesLinux tests Chtimes access time on a tempfile on Linux +func TestChtimesLinux(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat := f.Sys().(*syscall.Stat_t) + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/pkg/system/chtimes_windows.go b/pkg/system/chtimes_windows.go new file mode 100644 index 0000000..2945868 --- /dev/null +++ b/pkg/system/chtimes_windows.go @@ -0,0 +1,27 @@ +// +build windows + +package system + +import ( + "syscall" + "time" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) + pathp, e := syscall.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := syscall.CreateFile(pathp, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer syscall.Close(h) + c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) + return syscall.SetFileTime(h, &c, nil, nil) +} diff --git a/pkg/system/chtimes_windows_test.go b/pkg/system/chtimes_windows_test.go new file mode 100644 index 0000000..72d8a10 --- /dev/null +++ b/pkg/system/chtimes_windows_test.go @@ -0,0 +1,86 @@ +// +build windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesWindows tests Chtimes access time on a tempfile on Windows +func TestChtimesWindows(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/pkg/system/errors.go b/pkg/system/errors.go new file mode 100644 index 0000000..2883189 --- /dev/null +++ b/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/pkg/system/events_windows.go b/pkg/system/events_windows.go new file mode 100644 index 0000000..3ec6d22 --- /dev/null +++ b/pkg/system/events_windows.go @@ -0,0 +1,85 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/pkg/system/exitcode.go b/pkg/system/exitcode.go new file mode 100644 index 0000000..60f0514 --- /dev/null +++ b/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/pkg/system/filesys.go b/pkg/system/filesys.go new file mode 100644 index 0000000..102565f --- /dev/null +++ b/pkg/system/filesys.go @@ -0,0 +1,67 @@ +// +build !windows + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/pkg/system/filesys_windows.go b/pkg/system/filesys_windows.go new file mode 100644 index 0000000..20117db --- /dev/null +++ b/pkg/system/filesys_windows.go @@ -0,0 +1,298 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + winio "github.com/Microsoft/go-winio" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and syscall.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := syscall.SecurityAttributes{Length: 0} + + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := syscall.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := syscall.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and syscall packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := syscallOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/pkg/system/init.go b/pkg/system/init.go new file mode 100644 index 0000000..1793508 --- /dev/null +++ b/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/pkg/system/init_windows.go b/pkg/system/init_windows.go new file mode 100644 index 0000000..019c664 --- /dev/null +++ b/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/pkg/system/lcow_unix.go b/pkg/system/lcow_unix.go new file mode 100644 index 0000000..cff33bb --- /dev/null +++ b/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/pkg/system/lcow_windows.go b/pkg/system/lcow_windows.go new file mode 100644 index 0000000..e54d01e --- /dev/null +++ b/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/pkg/system/lstat_unix.go b/pkg/system/lstat_unix.go new file mode 100644 index 0000000..62fda5b --- /dev/null +++ b/pkg/system/lstat_unix.go @@ -0,0 +1,17 @@ +// +build !windows + +package system + +import "syscall" + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/pkg/system/lstat_unix_test.go b/pkg/system/lstat_unix_test.go new file mode 100644 index 0000000..062cf53 --- /dev/null +++ b/pkg/system/lstat_unix_test.go @@ -0,0 +1,30 @@ +// +build linux freebsd + +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/pkg/system/lstat_windows.go b/pkg/system/lstat_windows.go new file mode 100644 index 0000000..e51df0d --- /dev/null +++ b/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/pkg/system/meminfo.go b/pkg/system/meminfo.go new file mode 100644 index 0000000..3b6e947 --- /dev/null +++ b/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/pkg/system/meminfo_linux.go b/pkg/system/meminfo_linux.go new file mode 100644 index 0000000..385f1d5 --- /dev/null +++ b/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/pkg/system/meminfo_solaris.go b/pkg/system/meminfo_solaris.go new file mode 100644 index 0000000..925776e --- /dev/null +++ b/pkg/system/meminfo_solaris.go @@ -0,0 +1,129 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo CFLAGS: -std=c99 +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/pkg/system/meminfo_unix_test.go b/pkg/system/meminfo_unix_test.go new file mode 100644 index 0000000..44f5562 --- /dev/null +++ b/pkg/system/meminfo_unix_test.go @@ -0,0 +1,40 @@ +// +build linux freebsd + +package system + +import ( + "strings" + "testing" + + "github.com/docker/go-units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/pkg/system/meminfo_unsupported.go b/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000..3ce019d --- /dev/null +++ b/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/pkg/system/meminfo_windows.go b/pkg/system/meminfo_windows.go new file mode 100644 index 0000000..883944a --- /dev/null +++ b/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/pkg/system/mknod.go b/pkg/system/mknod.go new file mode 100644 index 0000000..7395818 --- /dev/null +++ b/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go new file mode 100644 index 0000000..2e863c0 --- /dev/null +++ b/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/pkg/system/path.go b/pkg/system/path.go new file mode 100644 index 0000000..f634a6b --- /dev/null +++ b/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/pkg/system/path_unix.go b/pkg/system/path_unix.go new file mode 100644 index 0000000..f3762e6 --- /dev/null +++ b/pkg/system/path_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package system + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/pkg/system/path_windows.go b/pkg/system/path_windows.go new file mode 100644 index 0000000..3fc4744 --- /dev/null +++ b/pkg/system/path_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/pkg/system/path_windows_test.go b/pkg/system/path_windows_test.go new file mode 100644 index 0000000..eccb26a --- /dev/null +++ b/pkg/system/path_windows_test.go @@ -0,0 +1,78 @@ +// +build windows + +package system + +import "testing" + +// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter +func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { + // Fails if not C drive. + path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) + if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { + t.Fatalf("Expected error for d:") + } + + // Single character is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { + t.Fatalf("Single character should pass") + } + if path != "z" { + t.Fatalf("Single character should be unchanged") + } + + // Two characters without colon is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { + t.Fatalf("2 characters without colon should pass") + } + if path != "AB" { + t.Fatalf("2 characters without colon should be unchanged") + } + + // Abs path without drive letter + if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { + t.Fatalf("abs path no drive letter should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter should be unchanged") + } + + // Abs path without drive letter, linux style + if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { + t.Fatalf("abs path no drive letter linux style should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter linux failed %s", path) + } + + // Drive-colon should be stripped + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`An absolute path should have been shortened to \ %s`, path) + } + + // Verify with a linux-style path + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) + } + + // Failure on c: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "c:"` { + t.Fatalf(path, err) + } + + // Failure on d: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "d:"` { + t.Fatalf(path, err) + } +} diff --git a/pkg/system/process_unix.go b/pkg/system/process_unix.go new file mode 100644 index 0000000..c99d796 --- /dev/null +++ b/pkg/system/process_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd solaris darwin + +package system + +import ( + "syscall" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := syscall.Kill(pid, syscall.Signal(0)) + if err == nil || err == syscall.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + syscall.Kill(pid, syscall.SIGKILL) +} diff --git a/pkg/system/rm.go b/pkg/system/rm.go new file mode 100644 index 0000000..ca0621f --- /dev/null +++ b/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any cirucmstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 5 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/pkg/system/rm_test.go b/pkg/system/rm_test.go new file mode 100644 index 0000000..fc2821f --- /dev/null +++ b/pkg/system/rm_test.go @@ -0,0 +1,84 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/docker/docker/pkg/mount" +) + +func TestEnsureRemoveAllNotExist(t *testing.T) { + // should never return an error for a non-existent path + if err := EnsureRemoveAll("/non/existent/path"); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithDir(t *testing.T) { + dir, err := ioutil.TempDir("", "test-ensure-removeall-with-dir") + if err != nil { + t.Fatal(err) + } + if err := EnsureRemoveAll(dir); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithFile(t *testing.T) { + tmp, err := ioutil.TempFile("", "test-ensure-removeall-with-dir") + if err != nil { + t.Fatal(err) + } + tmp.Close() + if err := EnsureRemoveAll(tmp.Name()); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithMount(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("mount not supported on Windows") + } + + dir1, err := ioutil.TempDir("", "test-ensure-removeall-with-dir1") + if err != nil { + t.Fatal(err) + } + dir2, err := ioutil.TempDir("", "test-ensure-removeall-with-dir2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir2) + + bindDir := filepath.Join(dir1, "bind") + if err := os.MkdirAll(bindDir, 0755); err != nil { + t.Fatal(err) + } + + if err := mount.Mount(dir2, bindDir, "none", "bind"); err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + go func() { + err = EnsureRemoveAll(dir1) + close(done) + }() + + select { + case <-done: + if err != nil { + t.Fatal(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for EnsureRemoveAll to finish") + } + + if _, err := os.Stat(dir1); !os.IsNotExist(err) { + t.Fatalf("expected %q to not exist", dir1) + } +} diff --git a/pkg/system/stat_darwin.go b/pkg/system/stat_darwin.go new file mode 100644 index 0000000..715f05b --- /dev/null +++ b/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/pkg/system/stat_freebsd.go b/pkg/system/stat_freebsd.go new file mode 100644 index 0000000..715f05b --- /dev/null +++ b/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go new file mode 100644 index 0000000..66bf6e2 --- /dev/null +++ b/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/pkg/system/stat_openbsd.go b/pkg/system/stat_openbsd.go new file mode 100644 index 0000000..b607dea --- /dev/null +++ b/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/pkg/system/stat_solaris.go b/pkg/system/stat_solaris.go new file mode 100644 index 0000000..b607dea --- /dev/null +++ b/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/pkg/system/stat_unix.go b/pkg/system/stat_unix.go new file mode 100644 index 0000000..6208e74 --- /dev/null +++ b/pkg/system/stat_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package system + +import "syscall" + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/pkg/system/stat_unix_test.go b/pkg/system/stat_unix_test.go new file mode 100644 index 0000000..dee8d30 --- /dev/null +++ b/pkg/system/stat_unix_test.go @@ -0,0 +1,39 @@ +// +build linux freebsd + +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.UID() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.GID() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/pkg/system/stat_windows.go b/pkg/system/stat_windows.go new file mode 100644 index 0000000..6c63972 --- /dev/null +++ b/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/pkg/system/syscall_unix.go b/pkg/system/syscall_unix.go new file mode 100644 index 0000000..3ae9128 --- /dev/null +++ b/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/pkg/system/syscall_windows.go b/pkg/system/syscall_windows.go new file mode 100644 index 0000000..c328a6f --- /dev/null +++ b/pkg/system/syscall_windows.go @@ -0,0 +1,122 @@ +package system + +import ( + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +var ( + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := syscall.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/pkg/system/syscall_windows_test.go b/pkg/system/syscall_windows_test.go new file mode 100644 index 0000000..4886b2b --- /dev/null +++ b/pkg/system/syscall_windows_test.go @@ -0,0 +1,9 @@ +package system + +import "testing" + +func TestHasWin32KSupport(t *testing.T) { + s := HasWin32KSupport() // make sure this doesn't panic + + t.Logf("win32k: %v", s) // will be different on different platforms -- informative only +} diff --git a/pkg/system/umask.go b/pkg/system/umask.go new file mode 100644 index 0000000..3d0146b --- /dev/null +++ b/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/pkg/system/umask_windows.go b/pkg/system/umask_windows.go new file mode 100644 index 0000000..13f1de1 --- /dev/null +++ b/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/pkg/system/utimes_freebsd.go b/pkg/system/utimes_freebsd.go new file mode 100644 index 0000000..e2eac3b --- /dev/null +++ b/pkg/system/utimes_freebsd.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/pkg/system/utimes_linux.go b/pkg/system/utimes_linux.go new file mode 100644 index 0000000..fc8a1ab --- /dev/null +++ b/pkg/system/utimes_linux.go @@ -0,0 +1,26 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/pkg/system/utimes_unix_test.go b/pkg/system/utimes_unix_test.go new file mode 100644 index 0000000..a73ed11 --- /dev/null +++ b/pkg/system/utimes_unix_test.go @@ -0,0 +1,68 @@ +// +build linux freebsd + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{Sec: 0, Nsec: 0}, {Sec: 0, Nsec: 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/pkg/system/utimes_unsupported.go b/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000..1397145 --- /dev/null +++ b/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/pkg/system/xattrs_linux.go b/pkg/system/xattrs_linux.go new file mode 100644 index 0000000..d2e2c05 --- /dev/null +++ b/pkg/system/xattrs_linux.go @@ -0,0 +1,63 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/pkg/system/xattrs_unsupported.go b/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000..0114f22 --- /dev/null +++ b/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/pkg/tailfile/tailfile.go b/pkg/tailfile/tailfile.go new file mode 100644 index 0000000..09eb393 --- /dev/null +++ b/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a fil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(left, os.SEEK_SET); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/pkg/tailfile/tailfile_test.go b/pkg/tailfile/tailfile_test.go new file mode 100644 index 0000000..31217c0 --- /dev/null +++ b/pkg/tailfile/tailfile_test.go @@ -0,0 +1,148 @@ +package tailfile + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestTailFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +third line +fourth line +fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last first line +next first line +next second line +next third line +next fourth line +next fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last second line +last third line +last fourth line +last fifth line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"last fourth line", "last fifth line"} + res, err := TailFile(f, 2) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailFileManyLines(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"first line", "second line"} + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailEmptyFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + if len(res) != 0 { + t.Fatal("Must be empty slice from empty file") + } +} + +func TestTailNegativeN(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } + if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } +} + +func BenchmarkTail(b *testing.B) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + b.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + for i := 0; i < 10000; i++ { + if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := TailFile(f, 1000); err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/tarsplitutils/tarstream.go b/pkg/tarsplitutils/tarstream.go new file mode 100644 index 0000000..217eae6 --- /dev/null +++ b/pkg/tarsplitutils/tarstream.go @@ -0,0 +1,127 @@ +package tarsplitutils + +import ( + "fmt" + "io" + "io/ioutil" + "sort" + + "github.com/vbatts/tar-split/tar/storage" +) + +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +func max(x, y int) int { + if x < y { + return y + } + return x +} + +type randomAccessTarStream struct { + entries storage.Entries + entryOffsets []int64 + fg storage.FileGetter +} + +func (self randomAccessTarStream) ReadAt(p []byte, off int64) (int, error) { + // Find the first entry that we're interested in + firstEntry := sort.Search(len(self.entryOffsets), func(i int) bool { return self.entryOffsets[i] > off }) - 1 + + // The cursor will most likely be negative the first time. This signifies + // that we need to read some data first before starting to fill the buffer + n := self.entryOffsets[firstEntry] - off + + for _, entry := range self.entries[firstEntry:] { + if n >= int64(len(p)) { + break + } + + switch entry.Type { + case storage.SegmentType: + payload := entry.Payload + if n < 0 { + payload = payload[-n:] + n = 0 + } + + n += int64(copy(p[n:], payload)) + case storage.FileType: + if entry.Size == 0 { + continue + } + + fh, err := self.fg.Get(entry.GetName()) + if err != nil { + return 0, err + } + + end := min(n+entry.Size, int64(len(p))) + + if n < 0 { + if seeker, ok := fh.(io.Seeker); ok { + if _, err := seeker.Seek(-n, io.SeekStart); err != nil { + return 0, err + } + } else { + if _, err := io.CopyN(ioutil.Discard, fh, -n); err != nil { + return 0, err + } + } + n = 0 + } + + _, err = io.ReadFull(fh, p[n:end]) + + n += end - n + if err != nil { + return 0, err + } + + fh.Close() + default: + return 0, fmt.Errorf("Unknown tar-split entry type: %v", entry.Type) + } + } + + return len(p), nil +} + +func NewRandomAccessTarStream(fg storage.FileGetter, up storage.Unpacker) (io.ReadSeeker, error) { + stream := randomAccessTarStream{ + fg: fg, + } + + size := int64(0) + for { + entry, err := up.Next() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + + stream.entryOffsets = append(stream.entryOffsets, size) + stream.entries = append(stream.entries, *entry) + + switch entry.Type { + case storage.SegmentType: + size += int64(len(entry.Payload)) + case storage.FileType: + size += entry.Size + } + } + + // Push ending offset. This is because when we binary search we search for + // the offset that is above the target and then we move one step back. + // See implementation of ReadAt() + stream.entryOffsets = append(stream.entryOffsets, size) + + return io.NewSectionReader(stream, 0, size), nil +} diff --git a/pkg/tarsum/builder_context.go b/pkg/tarsum/builder_context.go new file mode 100644 index 0000000..b42983e --- /dev/null +++ b/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/pkg/tarsum/builder_context_test.go b/pkg/tarsum/builder_context_test.go new file mode 100644 index 0000000..f54bf3a --- /dev/null +++ b/pkg/tarsum/builder_context_test.go @@ -0,0 +1,67 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/pkg/tarsum/fileinfosums.go b/pkg/tarsum/fileinfosums.go new file mode 100644 index 0000000..5abf5e7 --- /dev/null +++ b/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be melded with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/pkg/tarsum/fileinfosums_test.go b/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 0000000..2e243d5 --- /dev/null +++ b/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Error("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Error("Should have return nil if name not found.") + } + +} diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go new file mode 100644 index 0000000..154788d --- /dev/null +++ b/pkg/tarsum/tarsum.go @@ -0,0 +1,295 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "path" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// DefaultTHash is default TarSum hashing algorithm - "sha256". +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = path.Clean(currentHeader.Name) + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writer + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md new file mode 100644 index 0000000..89b2e49 --- /dev/null +++ b/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgments + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go new file mode 100644 index 0000000..86df0e2 --- /dev/null +++ b/pkg/tarsum/tarsum_test.go @@ -0,0 +1,664 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + defer jfh.Close() + + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 0000000..48e2af3 --- /dev/null +++ b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000000000000000000000000000000000..dfd5c204aea77673f13fdd2f81cb4af1c155c00c GIT binary patch literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg literal 0 HcmV?d00001 diff --git a/pkg/tarsum/testdata/collision/collision-2.tar b/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b5c04a9644808851fcccab5c3c240bf342abd93 GIT binary patch literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& literal 0 HcmV?d00001 diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go new file mode 100644 index 0000000..a62cc3e --- /dev/null +++ b/pkg/tarsum/versioning.go @@ -0,0 +1,158 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "io" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// WriteV1Header writes a tar header to a writer in V1 tarsum format. +func WriteV1Header(h *tar.Header, w io.Writer) { + for _, elem := range v1TarHeaderSelect(h) { + w.Write([]byte(elem[0] + elem[1])) + } +} + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/pkg/tarsum/versioning_test.go b/pkg/tarsum/versioning_test.go new file mode 100644 index 0000000..88e0a57 --- /dev/null +++ b/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/pkg/tarsum/writercloser.go b/pkg/tarsum/writercloser.go new file mode 100644 index 0000000..9727ecd --- /dev/null +++ b/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/pkg/templates/templates.go b/pkg/templates/templates.go new file mode 100644 index 0000000..75a3dd9 --- /dev/null +++ b/pkg/templates/templates.go @@ -0,0 +1,78 @@ +package templates + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.Encode(v) + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, + "truncate": truncateWithLength, +} + +// HeaderFunctions are used to created headers of a table. +// This is a replacement of basicFunctions for header generation +// because we want the header to remain intact. +// Some functions like `split` are irrevelant so not added. +var HeaderFunctions = template.FuncMap{ + "json": func(v string) string { + return v + }, + "title": func(v string) string { + return v + }, + "lower": func(v string) string { + return v + }, + "upper": func(v string) string { + return v + }, + "truncate": func(v string, l int) string { + return v + }, +} + +// Parse creates a new anonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} + +// truncateWithLength truncates the source string up to the length provided by the input +func truncateWithLength(source string, length int) string { + if len(source) < length { + return source + } + return source[:length] +} diff --git a/pkg/templates/templates_test.go b/pkg/templates/templates_test.go new file mode 100644 index 0000000..296bcb7 --- /dev/null +++ b/pkg/templates/templates_test.go @@ -0,0 +1,88 @@ +package templates + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Github #32120 +func TestParseJSONFunctions(t *testing.T) { + tm, err := Parse(`{{json .Ports}}`) + assert.NoError(t, err) + + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, map[string]string{"Ports": "0.0.0.0:2->8/udp"})) + want := "\"0.0.0.0:2->8/udp\"" + assert.Equal(t, want, b.String()) +} + +func TestParseStringFunctions(t *testing.T) { + tm, err := Parse(`{{join (split . ":") "/"}}`) + assert.NoError(t, err) + + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, "text:with:colon")) + want := "text/with/colon" + assert.Equal(t, want, b.String()) +} + +func TestNewParse(t *testing.T) { + tm, err := NewParse("foo", "this is a {{ . }}") + assert.NoError(t, err) + + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, "string")) + want := "this is a string" + assert.Equal(t, want, b.String()) +} + +func TestParseTruncateFunction(t *testing.T) { + source := "tupx5xzf6hvsrhnruz5cr8gwp" + + testCases := []struct { + template string + expected string + }{ + { + template: `{{truncate . 5}}`, + expected: "tupx5", + }, + { + template: `{{truncate . 25}}`, + expected: "tupx5xzf6hvsrhnruz5cr8gwp", + }, + { + template: `{{truncate . 30}}`, + expected: "tupx5xzf6hvsrhnruz5cr8gwp", + }, + { + template: `{{pad . 3 3}}`, + expected: " tupx5xzf6hvsrhnruz5cr8gwp ", + }, + } + + for _, testCase := range testCases { + tm, err := Parse(testCase.template) + assert.NoError(t, err) + + t.Run("Non Empty Source Test with template: "+testCase.template, func(t *testing.T) { + var b bytes.Buffer + assert.NoError(t, tm.Execute(&b, source)) + assert.Equal(t, testCase.expected, b.String()) + }) + + t.Run("Empty Source Test with template: "+testCase.template, func(t *testing.T) { + var c bytes.Buffer + assert.NoError(t, tm.Execute(&c, "")) + assert.Equal(t, "", c.String()) + }) + + t.Run("Nil Source Test with template: "+testCase.template, func(t *testing.T) { + var c bytes.Buffer + assert.Error(t, tm.Execute(&c, nil)) + assert.Equal(t, "", c.String()) + }) + } +} diff --git a/pkg/term/ascii.go b/pkg/term/ascii.go new file mode 100644 index 0000000..f5262bc --- /dev/null +++ b/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, byte(key[0])) + } + } + return codes, nil +} diff --git a/pkg/term/ascii_test.go b/pkg/term/ascii_test.go new file mode 100644 index 0000000..4a1e7f3 --- /dev/null +++ b/pkg/term/ascii_test.go @@ -0,0 +1,43 @@ +package term + +import "testing" + +func TestToBytes(t *testing.T) { + codes, err := ToBytes("ctrl-a,a") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 1 || codes[1] != 97 { + t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) + } + + codes, err = ToBytes("shift-z") + if err == nil { + t.Fatalf("Expected error, got none") + } + + codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") + if err != nil { + t.Fatal(err) + } + if len(codes) != 4 { + t.Fatalf("Expected 4 codes, got %d", len(codes)) + } + if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { + t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) + } + + codes, err = ToBytes("DEL,+") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 127 || codes[1] != 43 { + t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) + } +} diff --git a/pkg/term/proxy.go b/pkg/term/proxy.go new file mode 100644 index 0000000..e648eb8 --- /dev/null +++ b/pkg/term/proxy.go @@ -0,0 +1,74 @@ +package term + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (int, error) { + nr, err := r.r.Read(buf) + + preserve := func() { + // this preserves the original key presses in the passed in buffer + nr += r.escapeKeyPos + preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf...) + r.escapeKeyPos = 0 + copy(buf[0:nr], preserve) + } + + if nr != 1 || err != nil { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, err + } + + if buf[0] != r.escapeKeys[r.escapeKeyPos] { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, nil + } + + if r.escapeKeyPos == len(r.escapeKeys)-1 { + return 0, EscapeError{} + } + + // Looks like we've got an escape key, but we need to match again on the next + // read. + // Store the current escape key we found so we can look for the next one on + // the next read. + // Since this is an escape key, make sure we don't let the caller read it + // If later on we find that this is not the escape sequence, we'll add the + // keys back + r.escapeKeyPos++ + return nr - r.escapeKeyPos, nil +} diff --git a/pkg/term/proxy_test.go b/pkg/term/proxy_test.go new file mode 100644 index 0000000..baba193 --- /dev/null +++ b/pkg/term/proxy_test.go @@ -0,0 +1,92 @@ +package term + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEscapeProxyRead(t *testing.T) { + escapeKeys, _ := ToBytes("DEL") + keys, _ := ToBytes("a,b,c,+") + reader := NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf := make([]byte, len(keys)) + nr, err := reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + keys, _ = ToBytes("") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.Error(t, err, "Should throw error when no keys are to read") + require.EqualValues(t, nr, 0, "nr should be zero") + require.Condition(t, func() (success bool) { return len(keys) == 0 && len(buf) == 0 }, "keys & the read buffer size should be zero") + + escapeKeys, _ = ToBytes("ctrl-x,ctrl-@") + keys, _ = ToBytes("DEL") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 1, fmt.Sprintf("nr %d should be equal to the number of 1", nr)) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c") + keys, _ = ToBytes("ctrl-c") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.Condition(t, func() (success bool) { + return reflect.TypeOf(err).Name() == "EscapeError" + }, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,ctrl-z") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + nr, err = reader.Read(buf) + require.Condition(t, func() (success bool) { + return reflect.TypeOf(err).Name() == "EscapeError" + }, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[1:], buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,DEL,+") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,DEL") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, 0, "nr should be equal to 0") + require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + require.NoError(t, err) + require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + require.Equal(t, keys, buf, "keys & the read buffer should be equal") +} diff --git a/pkg/term/tc.go b/pkg/term/tc.go new file mode 100644 index 0000000..6d2dfd3 --- /dev/null +++ b/pkg/term/tc.go @@ -0,0 +1,21 @@ +// +build !windows +// +build !solaris !cgo + +package term + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/pkg/term/tc_solaris_cgo.go b/pkg/term/tc_solaris_cgo.go new file mode 100644 index 0000000..50234af --- /dev/null +++ b/pkg/term/tc_solaris_cgo.go @@ -0,0 +1,65 @@ +// +build solaris,cgo + +package term + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for unix.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON | unix.IXANY) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + + /* + VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned + Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It + needs to be explicitly set to 1. + */ + newState.Cc[C.VMIN] = 1 + newState.Cc[C.VTIME] = 0 + + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/pkg/term/term.go b/pkg/term/term.go new file mode 100644 index 0000000..4f59d8d --- /dev/null +++ b/pkg/term/term.go @@ -0,0 +1,124 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + + "golang.org/x/sys/unix" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/pkg/term/term_linux_test.go b/pkg/term/term_linux_test.go new file mode 100644 index 0000000..a1628c4 --- /dev/null +++ b/pkg/term/term_linux_test.go @@ -0,0 +1,120 @@ +//+build linux + +package term + +import ( + "flag" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var rootEnabled bool + +func init() { + flag.BoolVar(&rootEnabled, "test.root", false, "enable tests that require root") +} + +// RequiresRoot skips tests that require root, unless the test.root flag has +// been set +func RequiresRoot(t *testing.T) { + if !rootEnabled { + t.Skip("skipping test that requires root") + return + } + assert.Equal(t, 0, os.Getuid(), "This test must be run as root.") +} + +func newTtyForTest(t *testing.T) (*os.File, error) { + RequiresRoot(t) + return os.OpenFile("/dev/tty", os.O_RDWR, os.ModeDevice) +} + +func newTempFile() (*os.File, error) { + return ioutil.TempFile(os.TempDir(), "temp") +} + +func TestGetWinsize(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + winSize, err := GetWinsize(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, winSize) + require.NotNil(t, winSize.Height) + require.NotNil(t, winSize.Width) + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} + err = SetWinsize(tty.Fd(), &newSize) + require.NoError(t, err) + winSize, err = GetWinsize(tty.Fd()) + require.NoError(t, err) + require.Equal(t, *winSize, newSize) +} + +func TestSetWinsize(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + winSize, err := GetWinsize(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, winSize) + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} + err = SetWinsize(tty.Fd(), &newSize) + require.NoError(t, err) + winSize, err = GetWinsize(tty.Fd()) + require.NoError(t, err) + require.Equal(t, *winSize, newSize) +} + +func TestGetFdInfo(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + inFd, isTerminal := GetFdInfo(tty) + require.Equal(t, inFd, tty.Fd()) + require.Equal(t, isTerminal, true) + tmpFile, err := newTempFile() + defer tmpFile.Close() + inFd, isTerminal = GetFdInfo(tmpFile) + require.Equal(t, inFd, tmpFile.Fd()) + require.Equal(t, isTerminal, false) +} + +func TestIsTerminal(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + isTerminal := IsTerminal(tty.Fd()) + require.Equal(t, isTerminal, true) + tmpFile, err := newTempFile() + defer tmpFile.Close() + isTerminal = IsTerminal(tmpFile.Fd()) + require.Equal(t, isTerminal, false) +} + +func TestSaveState(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + state, err := SaveState(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, state) + tty, err = newTtyForTest(t) + defer tty.Close() + err = RestoreTerminal(tty.Fd(), state) + require.NoError(t, err) +} + +func TestDisableEcho(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + require.NoError(t, err) + state, err := SetRawTerminal(tty.Fd()) + require.NoError(t, err) + require.NotNil(t, state) + err = DisableEcho(tty.Fd(), state) + require.NoError(t, err) +} diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go new file mode 100644 index 0000000..fd023ba --- /dev/null +++ b/pkg/term/term_windows.go @@ -0,0 +1,233 @@ +// +build windows + +package term + +import ( + "io" + "os" + "os/signal" + + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/term/windows" + "golang.org/x/sys/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + enableVirtualTerminalInput = 0x0200 + enableVirtualTerminalProcessing = 0x0004 + disableNewlineAutoReturn = 0x0008 +) + +// vtInputSupported is true if enableVirtualTerminalInput is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate that enableVirtualTerminalInput is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(fd, mode) + } + + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStdout = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStderr = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { + // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. + emulateStdin = true + emulateStdout = false + emulateStderr = false + } + + if emulateStdin { + stdIn = windowsconsole.NewAnsiReader(windows.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + stdOut = windowsconsole.NewAnsiWriter(windows.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + stdErr = windowsconsole.NewAnsiWriter(windows.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windowsconsole.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windowsconsole.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since disableNewlineAutoReturn might not be supported on this + // version of Windows. + winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= enableVirtualTerminalInput + } + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/pkg/term/termios_bsd.go b/pkg/term/termios_bsd.go new file mode 100644 index 0000000..c47341e --- /dev/null +++ b/pkg/term/termios_bsd.go @@ -0,0 +1,42 @@ +// +build darwin freebsd openbsd + +package term + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + newState.Cc[unix.VMIN] = 1 + newState.Cc[unix.VTIME] = 0 + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/pkg/term/termios_linux.go b/pkg/term/termios_linux.go new file mode 100644 index 0000000..31bfa84 --- /dev/null +++ b/pkg/term/termios_linux.go @@ -0,0 +1,38 @@ +package term + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/pkg/term/windows/ansi_reader.go b/pkg/term/windows/ansi_reader.go new file mode 100644 index 0000000..29d3963 --- /dev/null +++ b/pkg/term/windows/ansi_reader.go @@ -0,0 +1,263 @@ +// +build windows + +package windowsconsole + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/pkg/term/windows/ansi_writer.go b/pkg/term/windows/ansi_writer.go new file mode 100644 index 0000000..256577e --- /dev/null +++ b/pkg/term/windows/ansi_writer.go @@ -0,0 +1,64 @@ +// +build windows + +package windowsconsole + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + initLogger() + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/pkg/term/windows/console.go b/pkg/term/windows/console.go new file mode 100644 index 0000000..4bad32e --- /dev/null +++ b/pkg/term/windows/console.go @@ -0,0 +1,35 @@ +// +build windows + +package windowsconsole + +import ( + "os" + + "github.com/Azure/go-ansiterm/winterm" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/pkg/term/windows/windows.go b/pkg/term/windows/windows.go new file mode 100644 index 0000000..d67021e --- /dev/null +++ b/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole + +import ( + "io/ioutil" + "os" + "sync" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/pkg/term/windows/windows_test.go b/pkg/term/windows/windows_test.go new file mode 100644 index 0000000..3c8084b --- /dev/null +++ b/pkg/term/windows/windows_test.go @@ -0,0 +1,3 @@ +// This file is necessary to pass the Docker tests. + +package windowsconsole diff --git a/pkg/term/winsize.go b/pkg/term/winsize.go new file mode 100644 index 0000000..f58367f --- /dev/null +++ b/pkg/term/winsize.go @@ -0,0 +1,30 @@ +// +build !solaris,!windows + +package term + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} diff --git a/pkg/term/winsize_solaris_cgo.go b/pkg/term/winsize_solaris_cgo.go new file mode 100644 index 0000000..39c1d32 --- /dev/null +++ b/pkg/term/winsize_solaris_cgo.go @@ -0,0 +1,42 @@ +// +build solaris,cgo + +package term + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +/* +#include +#include +#include + +// Small wrapper to get rid of variadic args of ioctl() +int my_ioctl(int fd, int cmd, struct winsize *ws) { + return ioctl(fd, cmd, ws); +} +*/ +import "C" + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return nil + } + return err +} diff --git a/pkg/testutil/cmd/command.go b/pkg/testutil/cmd/command.go new file mode 100644 index 0000000..36aba59 --- /dev/null +++ b/pkg/testutil/cmd/command.go @@ -0,0 +1,307 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +type testingT interface { + Fatalf(string, ...interface{}) +} + +const ( + // None is a token to inform Result.Assert that the output should be empty + None string = "" +) + +type lockedBuffer struct { + m sync.RWMutex + buf bytes.Buffer +} + +func (buf *lockedBuffer) Write(b []byte) (int, error) { + buf.m.Lock() + defer buf.m.Unlock() + return buf.buf.Write(b) +} + +func (buf *lockedBuffer) String() string { + buf.m.RLock() + defer buf.m.RUnlock() + return buf.buf.String() +} + +// Result stores the result of running a command +type Result struct { + Cmd *exec.Cmd + ExitCode int + Error error + // Timeout is true if the command was killed because it ran for too long + Timeout bool + outBuffer *lockedBuffer + errBuffer *lockedBuffer +} + +// Assert compares the Result against the Expected struct, and fails the test if +// any of the expcetations are not met. +func (r *Result) Assert(t testingT, exp Expected) *Result { + err := r.Compare(exp) + if err == nil { + return r + } + _, file, line, ok := runtime.Caller(1) + if ok { + t.Fatalf("at %s:%d - %s", filepath.Base(file), line, err.Error()) + } else { + t.Fatalf("(no file/line info) - %s", err.Error()) + } + return nil +} + +// Compare returns a formatted error with the command, stdout, stderr, exit +// code, and any failed expectations +func (r *Result) Compare(exp Expected) error { + errors := []string{} + add := func(format string, args ...interface{}) { + errors = append(errors, fmt.Sprintf(format, args...)) + } + + if exp.ExitCode != r.ExitCode { + add("ExitCode was %d expected %d", r.ExitCode, exp.ExitCode) + } + if exp.Timeout != r.Timeout { + if exp.Timeout { + add("Expected command to timeout") + } else { + add("Expected command to finish, but it hit the timeout") + } + } + if !matchOutput(exp.Out, r.Stdout()) { + add("Expected stdout to contain %q", exp.Out) + } + if !matchOutput(exp.Err, r.Stderr()) { + add("Expected stderr to contain %q", exp.Err) + } + switch { + // If a non-zero exit code is expected there is going to be an error. + // Don't require an error message as well as an exit code because the + // error message is going to be "exit status which is not useful + case exp.Error == "" && exp.ExitCode != 0: + case exp.Error == "" && r.Error != nil: + add("Expected no error") + case exp.Error != "" && r.Error == nil: + add("Expected error to contain %q, but there was no error", exp.Error) + case exp.Error != "" && !strings.Contains(r.Error.Error(), exp.Error): + add("Expected error to contain %q", exp.Error) + } + + if len(errors) == 0 { + return nil + } + return fmt.Errorf("%s\nFailures:\n%s\n", r, strings.Join(errors, "\n")) +} + +func matchOutput(expected string, actual string) bool { + switch expected { + case None: + return actual == "" + default: + return strings.Contains(actual, expected) + } +} + +func (r *Result) String() string { + var timeout string + if r.Timeout { + timeout = " (timeout)" + } + + return fmt.Sprintf(` +Command: %s +ExitCode: %d%s +Error: %v +Stdout: %v +Stderr: %v +`, + strings.Join(r.Cmd.Args, " "), + r.ExitCode, + timeout, + r.Error, + r.Stdout(), + r.Stderr()) +} + +// Expected is the expected output from a Command. This struct is compared to a +// Result struct by Result.Assert(). +type Expected struct { + ExitCode int + Timeout bool + Error string + Out string + Err string +} + +// Success is the default expected result +var Success = Expected{} + +// Stdout returns the stdout of the process as a string +func (r *Result) Stdout() string { + return r.outBuffer.String() +} + +// Stderr returns the stderr of the process as a string +func (r *Result) Stderr() string { + return r.errBuffer.String() +} + +// Combined returns the stdout and stderr combined into a single string +func (r *Result) Combined() string { + return r.outBuffer.String() + r.errBuffer.String() +} + +// SetExitError sets Error and ExitCode based on Error +func (r *Result) SetExitError(err error) { + if err == nil { + return + } + r.Error = err + r.ExitCode = system.ProcessExitCode(err) +} + +type matches struct{} + +// Info returns the CheckerInfo +func (m *matches) Info() *check.CheckerInfo { + return &check.CheckerInfo{ + Name: "CommandMatches", + Params: []string{"result", "expected"}, + } +} + +// Check compares a result against the expected +func (m *matches) Check(params []interface{}, names []string) (bool, string) { + result, ok := params[0].(*Result) + if !ok { + return false, fmt.Sprintf("result must be a *Result, not %T", params[0]) + } + expected, ok := params[1].(Expected) + if !ok { + return false, fmt.Sprintf("expected must be an Expected, not %T", params[1]) + } + + err := result.Compare(expected) + if err == nil { + return true, "" + } + return false, err.Error() +} + +// Matches is a gocheck.Checker for comparing a Result against an Expected +var Matches = &matches{} + +// Cmd contains the arguments and options for a process to run as part of a test +// suite. +type Cmd struct { + Command []string + Timeout time.Duration + Stdin io.Reader + Stdout io.Writer + Dir string + Env []string +} + +// Command create a simple Cmd with the specified command and arguments +func Command(command string, args ...string) Cmd { + return Cmd{Command: append([]string{command}, args...)} +} + +// RunCmd runs a command and returns a Result +func RunCmd(cmd Cmd, cmdOperators ...func(*Cmd)) *Result { + for _, op := range cmdOperators { + op(&cmd) + } + result := StartCmd(cmd) + if result.Error != nil { + return result + } + return WaitOnCmd(cmd.Timeout, result) +} + +// RunCommand parses a command line and runs it, returning a result +func RunCommand(command string, args ...string) *Result { + return RunCmd(Command(command, args...)) +} + +// StartCmd starts a command, but doesn't wait for it to finish +func StartCmd(cmd Cmd) *Result { + result := buildCmd(cmd) + if result.Error != nil { + return result + } + result.SetExitError(result.Cmd.Start()) + return result +} + +func buildCmd(cmd Cmd) *Result { + var execCmd *exec.Cmd + switch len(cmd.Command) { + case 1: + execCmd = exec.Command(cmd.Command[0]) + default: + execCmd = exec.Command(cmd.Command[0], cmd.Command[1:]...) + } + outBuffer := new(lockedBuffer) + errBuffer := new(lockedBuffer) + + execCmd.Stdin = cmd.Stdin + execCmd.Dir = cmd.Dir + execCmd.Env = cmd.Env + if cmd.Stdout != nil { + execCmd.Stdout = io.MultiWriter(outBuffer, cmd.Stdout) + } else { + execCmd.Stdout = outBuffer + } + execCmd.Stderr = errBuffer + return &Result{ + Cmd: execCmd, + outBuffer: outBuffer, + errBuffer: errBuffer, + } +} + +// WaitOnCmd waits for a command to complete. If timeout is non-nil then +// only wait until the timeout. +func WaitOnCmd(timeout time.Duration, result *Result) *Result { + if timeout == time.Duration(0) { + result.SetExitError(result.Cmd.Wait()) + return result + } + + done := make(chan error, 1) + // Wait for command to exit in a goroutine + go func() { + done <- result.Cmd.Wait() + }() + + select { + case <-time.After(timeout): + killErr := result.Cmd.Process.Kill() + if killErr != nil { + fmt.Printf("failed to kill (pid=%d): %v\n", result.Cmd.Process.Pid, killErr) + } + result.Timeout = true + case err := <-done: + result.SetExitError(err) + } + return result +} diff --git a/pkg/testutil/cmd/command_test.go b/pkg/testutil/cmd/command_test.go new file mode 100644 index 0000000..d24b42b --- /dev/null +++ b/pkg/testutil/cmd/command_test.go @@ -0,0 +1,118 @@ +package cmd + +import ( + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestRunCommand(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result := RunCommand(cmd) + result.Assert(t, Expected{}) + + result = RunCommand("doesnotexists") + expectedError := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{ExitCode: 127, Error: expectedError}) + + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + ExitCode: 2, + Error: "exit status 2", + Err: "invalid option", + }) + assert.Contains(t, result.Combined(), "invalid option") +} + +func TestRunCommandWithCombined(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCommand("ls", "-a") + result.Assert(t, Expected{}) + + assert.Contains(t, result.Combined(), "..") + assert.Contains(t, result.Stdout(), "..") +} + +func TestRunCommandWithTimeoutFinished(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCmd(Cmd{ + Command: []string{"ls", "-a"}, + Timeout: 50 * time.Millisecond, + }) + result.Assert(t, Expected{Out: ".."}) +} + +func TestRunCommandWithTimeoutKilled(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + command := []string{"sh", "-c", "while true ; do echo 1 ; sleep .5 ; done"} + result := RunCmd(Cmd{Command: command, Timeout: 1250 * time.Millisecond}) + result.Assert(t, Expected{Timeout: true}) + + ones := strings.Split(result.Stdout(), "\n") + assert.Len(t, ones, 4) +} + +func TestRunCommandWithErrors(t *testing.T) { + result := RunCommand("/foobar") + result.Assert(t, Expected{Error: "foobar", ExitCode: 127}) +} + +func TestRunCommandWithStdoutStderr(t *testing.T) { + result := RunCommand("echo", "hello", "world") + result.Assert(t, Expected{Out: "hello world\n", Err: None}) +} + +func TestRunCommandWithStdoutStderrError(t *testing.T) { + result := RunCommand("doesnotexists") + + expected := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{Out: None, Err: None, ExitCode: 127, Error: expected}) + + switch runtime.GOOS { + case "windows": + expected = "ls: unknown option" + case "solaris": + expected = "gls: invalid option" + default: + expected = "ls: invalid option" + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + Out: None, + Err: expected, + ExitCode: 2, + Error: "exit status 2", + }) +} diff --git a/pkg/testutil/golden/golden.go b/pkg/testutil/golden/golden.go new file mode 100644 index 0000000..8f725da --- /dev/null +++ b/pkg/testutil/golden/golden.go @@ -0,0 +1,28 @@ +// Package golden provides function and helpers to use golden file for +// testing purpose. +package golden + +import ( + "flag" + "io/ioutil" + "path/filepath" + "testing" +) + +var update = flag.Bool("test.update", false, "update golden file") + +// Get returns the golden file content. If the `test.update` is specified, it updates the +// file with the current output and returns it. +func Get(t *testing.T, actual []byte, filename string) []byte { + golden := filepath.Join("testdata", filename) + if *update { + if err := ioutil.WriteFile(golden, actual, 0644); err != nil { + t.Fatal(err) + } + } + expected, err := ioutil.ReadFile(golden) + if err != nil { + t.Fatal(err) + } + return expected +} diff --git a/pkg/testutil/helpers.go b/pkg/testutil/helpers.go new file mode 100644 index 0000000..c291148 --- /dev/null +++ b/pkg/testutil/helpers.go @@ -0,0 +1,33 @@ +package testutil + +import ( + "strings" + "unicode" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// ErrorContains checks that the error is not nil, and contains the expected +// substring. +func ErrorContains(t require.TestingT, err error, expectedError string) { + require.Error(t, err) + assert.Contains(t, err.Error(), expectedError) +} + +// EqualNormalizedString compare the actual value to the expected value after applying the specified +// transform function. It fails the test if these two transformed string are not equal. +// For example `EqualNormalizedString(t, RemoveSpace, "foo\n", "foo")` wouldn't fail the test as +// spaces (and thus '\n') are removed before comparing the string. +func EqualNormalizedString(t require.TestingT, transformFun func(rune) rune, actual, expected string) { + require.Equal(t, strings.Map(transformFun, expected), strings.Map(transformFun, actual)) +} + +// RemoveSpace returns -1 if the specified runes is considered as a space (unicode) +// and the rune itself otherwise. +func RemoveSpace(r rune) rune { + if unicode.IsSpace(r) { + return -1 + } + return r +} diff --git a/pkg/testutil/pkg.go b/pkg/testutil/pkg.go new file mode 100644 index 0000000..110b2e6 --- /dev/null +++ b/pkg/testutil/pkg.go @@ -0,0 +1 @@ +package testutil diff --git a/pkg/testutil/tempfile/tempfile.go b/pkg/testutil/tempfile/tempfile.go new file mode 100644 index 0000000..01474ba --- /dev/null +++ b/pkg/testutil/tempfile/tempfile.go @@ -0,0 +1,56 @@ +package tempfile + +import ( + "io/ioutil" + "os" + + "github.com/stretchr/testify/require" +) + +// TempFile is a temporary file that can be used with unit tests. TempFile +// reduces the boilerplate setup required in each test case by handling +// setup errors. +type TempFile struct { + File *os.File +} + +// NewTempFile returns a new temp file with contents +func NewTempFile(t require.TestingT, prefix string, content string) *TempFile { + file, err := ioutil.TempFile("", prefix+"-") + require.NoError(t, err) + + _, err = file.Write([]byte(content)) + require.NoError(t, err) + file.Close() + return &TempFile{File: file} +} + +// Name returns the filename +func (f *TempFile) Name() string { + return f.File.Name() +} + +// Remove removes the file +func (f *TempFile) Remove() { + os.Remove(f.Name()) +} + +// TempDir is a temporary directory that can be used with unit tests. TempDir +// reduces the boilerplate setup required in each test case by handling +// setup errors. +type TempDir struct { + Path string +} + +// NewTempDir returns a new temp file with contents +func NewTempDir(t require.TestingT, prefix string) *TempDir { + path, err := ioutil.TempDir("", prefix+"-") + require.NoError(t, err) + + return &TempDir{Path: path} +} + +// Remove removes the file +func (f *TempDir) Remove() { + os.Remove(f.Path) +} diff --git a/pkg/testutil/utils.go b/pkg/testutil/utils.go new file mode 100644 index 0000000..0522dde --- /dev/null +++ b/pkg/testutil/utils.go @@ -0,0 +1,218 @@ +package testutil + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/system" +) + +// IsKilled process the specified error and returns whether the process was killed or not. +func IsKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return false + } + // status.ExitStatus() is required on Windows because it does not + // implement Signal() nor Signaled(). Just check it had a bad exit + // status could mean it was killed (and in tests we do kill) + return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 + } + return false +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + out, err := cmd.CombinedOutput() + exitCode = system.ProcessExitCode(err) + output = string(out) + return +} + +// RunCommandPipelineWithOutput runs the array of commands with the output +// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). +// It returns the final output, the exitCode different from 0 and the error +// if something bad happened. +func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + if len(cmds) < 2 { + return "", 0, errors.New("pipeline does not have multiple cmds") + } + + // connect stdin of each cmd to stdout pipe of previous cmd + for i, cmd := range cmds { + if i > 0 { + prevCmd := cmds[i-1] + cmd.Stdin, err = prevCmd.StdoutPipe() + + if err != nil { + return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) + } + } + } + + // start all cmds except the last + for _, cmd := range cmds[:len(cmds)-1] { + if err = cmd.Start(); err != nil { + return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) + } + } + + defer func() { + var pipeErrMsgs []string + // wait all cmds except the last to release their resources + for _, cmd := range cmds[:len(cmds)-1] { + if pipeErr := cmd.Wait(); pipeErr != nil { + pipeErrMsgs = append(pipeErrMsgs, fmt.Sprintf("command %s failed with error: %v", cmd.Path, pipeErr)) + } + } + if len(pipeErrMsgs) > 0 && err == nil { + err = fmt.Errorf("pipelineError from Wait: %v", strings.Join(pipeErrMsgs, ", ")) + } + }() + + // wait on last cmd + return runCommandWithOutput(cmds[len(cmds)-1]) +} + +// ConvertSliceOfStringsToMap converts a slices of string in a map +// with the strings as key and an empty string as values. +func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) +// and returns an error if different. +func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +// ListTar lists the entries of a tar. +func ListTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +// RandomTmpDirPath provides a temporary path with rand string appended. +// does not create or checks if it exists. +func RandomTmpDirPath(s string, platform string) string { + tmp := "/tmp" + if platform == "windows" { + tmp = os.Getenv("TEMP") + } + path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) + if platform == "windows" { + return filepath.FromSlash(path) // Using \ + } + return filepath.ToSlash(path) // Using / +} + +// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping +// for interval duration. Returns total read bytes. Send true to the +// stop channel to return before reading to EOF on the reader. +func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + select { + case <-stop: + return + case <-time.After(interval): + } + } +} + +// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func ParseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} + +// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. +type ChannelBuffer struct { + C chan []byte +} + +// Write implements Writer. +func (c *ChannelBuffer) Write(b []byte) (int, error) { + c.C <- b + return len(b), nil +} + +// Close closes the go channel. +func (c *ChannelBuffer) Close() error { + close(c.C) + return nil +} + +// ReadTimeout reads the content of the channel in the specified byte array with +// the specified duration as timeout. +func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { + select { + case b := <-c.C: + return copy(p[0:], b), nil + case <-time.After(n): + return -1, fmt.Errorf("timeout reading from channel") + } +} + +// ReadBody read the specified ReadCloser content and returns it +func ReadBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) +} diff --git a/pkg/testutil/utils_test.go b/pkg/testutil/utils_test.go new file mode 100644 index 0000000..d37f3f4 --- /dev/null +++ b/pkg/testutil/utils_test.go @@ -0,0 +1,341 @@ +package testutil + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { + var lsCmd *exec.Cmd + if runtime.GOOS != "windows" { + lsCmd = exec.Command("ls") + } else { + lsCmd = exec.Command("cmd", "/c", "dir") + } + + err := lsCmd.Run() + if IsKilled(err) { + t.Fatalf("Expected the ls command to not be killed, was.") + } +} + +func TestIsKilledTrueWithKilledProcess(t *testing.T) { + var longCmd *exec.Cmd + if runtime.GOOS != "windows" { + longCmd = exec.Command("top") + } else { + longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") + } + + // Start a command + err := longCmd.Start() + if err != nil { + t.Fatal(err) + } + // Capture the error when *dying* + done := make(chan error, 1) + go func() { + done <- longCmd.Wait() + }() + // Then kill it + longCmd.Process.Kill() + // Get the error + err = <-done + if !IsKilled(err) { + t.Fatalf("Expected the command to be killed, was not.") + } +} + +func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { + _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) + expectedError := "pipeline does not have multiple cmds" + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) + } +} + +func TestRunCommandPipelineWithOutputErrors(t *testing.T) { + p := "$PATH" + if runtime.GOOS == "windows" { + p = "%PATH%" + } + cmd1 := exec.Command("ls") + cmd1.Stdout = os.Stdout + cmd2 := exec.Command("anything really") + _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) + if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { + t.Fatalf("Expected an error, got %v", err) + } + + cmdWithError := exec.Command("doesnotexists") + cmdCat := exec.Command("cat") + _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) + if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p { + t.Fatalf("Expected an error, got %v", err) + } +} + +func TestRunCommandPipelineWithOutput(t *testing.T) { + //TODO: Should run on Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + cmds := []*exec.Cmd{ + // Print 2 characters + exec.Command("echo", "-n", "11"), + // Count the number or char from stdin (previous command) + exec.Command("wc", "-m"), + } + out, exitCode, err := RunCommandPipelineWithOutput(cmds...) + expectedOutput := "2\n" + if out != expectedOutput || exitCode != 0 || err != nil { + t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) + } +} + +func TestConvertSliceOfStringsToMap(t *testing.T) { + input := []string{"a", "b"} + actual := ConvertSliceOfStringsToMap(input) + for _, key := range input { + if _, ok := actual[key]; !ok { + t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) + } + } +} + +func TestCompareDirectoryEntries(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + file1 := filepath.Join(tmpFolder, "file1") + file2 := filepath.Join(tmpFolder, "file2") + os.Create(file1) + os.Create(file2) + + fi1, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi1bis, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi2, err := os.Stat(file2) + if err != nil { + t.Fatal(err) + } + + cases := []struct { + e1 []os.FileInfo + e2 []os.FileInfo + shouldError bool + }{ + // Empty directories + { + []os.FileInfo{}, + []os.FileInfo{}, + false, + }, + // Same FileInfos + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1}, + false, + }, + // Different FileInfos but same names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1bis}, + false, + }, + // Different FileInfos, different names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi2}, + true, + }, + } + for _, elt := range cases { + err := CompareDirectoryEntries(elt.e1, elt.e2) + if elt.shouldError && err == nil { + t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) + } + if !elt.shouldError && err != nil { + t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) + } + } +} + +// FIXME make an "unhappy path" test for ListTar without "panicking" :-) +func TestListTar(t *testing.T) { + // TODO Windows: Figure out why this fails. Should be portable. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows - needs further investigation") + } + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + // Let's create a Tar file + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + reader, err := os.Open(tarFile) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + entries, err := ListTar(reader) + if err != nil { + t.Fatal(err) + } + if len(entries) != 1 && entries[0] != "src" { + t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) + } +} + +func TestRandomTmpDirPath(t *testing.T) { + path := RandomTmpDirPath("something", runtime.GOOS) + + prefix := "/tmp/something" + if runtime.GOOS == "windows" { + prefix = os.Getenv("TEMP") + `\something` + } + expectedSize := len(prefix) + 11 + + if !strings.HasPrefix(path, prefix) { + t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) + } + if len(path) != expectedSize { + t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) + } +} + +func TestConsumeWithSpeed(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 10*time.Millisecond, nil) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 10 { + t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) + } + +} + +func TestConsumeWithSpeedWithStop(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + stopIt := make(chan bool) + + go func() { + time.Sleep(1 * time.Millisecond) + stopIt <- true + }() + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 2 { + t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) + } + +} + +func TestParseCgroupPathsEmpty(t *testing.T) { + cgroupMap := ParseCgroupPaths("") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("\n") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("something:else\nagain:here") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } +} + +func TestParseCgroupPaths(t *testing.T) { + cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") + if len(cgroupMap) != 2 { + t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) + } + if value, ok := cgroupMap["memory"]; !ok || value != "/a" { + t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) + } + if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { + t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) + } +} + +func TestChannelBufferTimeout(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + done := make(chan struct{}, 1) + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + done <- struct{}{} + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 50*time.Millisecond) + if err == nil && err.Error() != "timeout reading from channel" { + t.Fatalf("Expected an error, got %s", err) + } + <-done +} + +func TestChannelBuffer(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 200*time.Millisecond) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("Expected '%s', got '%s'", expected, string(b)) + } +} diff --git a/pkg/tlsconfig/tlsconfig_clone.go b/pkg/tlsconfig/tlsconfig_clone.go new file mode 100644 index 0000000..e4dec3a --- /dev/null +++ b/pkg/tlsconfig/tlsconfig_clone.go @@ -0,0 +1,11 @@ +// +build go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/pkg/tlsconfig/tlsconfig_clone_go17.go b/pkg/tlsconfig/tlsconfig_clone_go17.go new file mode 100644 index 0000000..0d5b448 --- /dev/null +++ b/pkg/tlsconfig/tlsconfig_clone_go17.go @@ -0,0 +1,33 @@ +// +build go1.7,!go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go new file mode 100644 index 0000000..74776e6 --- /dev/null +++ b/pkg/truncindex/truncindex.go @@ -0,0 +1,139 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + return idx.addID(id) +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs and passes each of them to the given +// handler. Take care that the handler method does not call any public +// method on truncindex as the internal locking is not reentrant/recursive +// and will result in deadlock. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.Lock() + defer idx.Unlock() + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go new file mode 100644 index 0000000..89658ca --- /dev/null +++ b/pkg/truncindex/truncindex_test.go @@ -0,0 +1,453 @@ +package truncindex + +import ( + "math/rand" + "testing" + "time" + + "github.com/docker/docker/pkg/stringid" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // An ambiguous id prefix should return an error + if _, err := index.Get(id[:4]); err == nil { + t.Fatal("An ambiguous id prefix should return an error") + } + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) + + assertIndexIterate(t) + assertIndexIterateDoNotPanic(t) +} + +func assertIndexIterate(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + "37b36c2c326ccc11e726eee6ee78a0baf166ef96", + "46b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + + index.Iterate(func(targetId string) { + for _, id := range ids { + if targetId == id { + return + } + } + + t.Fatalf("An unknown ID '%s'", targetId) + }) +} + +func assertIndexIterateDoNotPanic(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + iterationStarted := make(chan bool, 1) + + go func() { + <-iterationStarted + index.Delete("19b36c2c326ccc11e726eee6ee78a0baf166ef96") + }() + + index.Iterate(func(targetId string) { + if targetId == "19b36c2c326ccc11e726eee6ee78a0baf166ef96" { + iterationStarted <- true + time.Sleep(100 * time.Millisecond) + } + }) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff --git a/pkg/urlutil/urlutil.go b/pkg/urlutil/urlutil.go new file mode 100644 index 0000000..cfcd582 --- /dev/null +++ b/pkg/urlutil/urlutil.go @@ -0,0 +1,44 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/pkg/urlutil/urlutil_test.go b/pkg/urlutil/urlutil_test.go new file mode 100644 index 0000000..e7579f5 --- /dev/null +++ b/pkg/urlutil/urlutil_test.go @@ -0,0 +1,56 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } + transportUrls = []string{ + "tcp://example.com", + "tcp+tls://example.com", + "udp://example.com", + "unix:///example", + "unixgram:///example", + } +) + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if !IsGitURL(url) { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if !IsGitURL(url) { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsTransport(t *testing.T) { + for _, url := range transportUrls { + if !IsTransportURL(url) { + t.Fatalf("%q should be detected as valid Transport url", url) + } + } +} diff --git a/pkg/useragent/README.md b/pkg/useragent/README.md new file mode 100644 index 0000000..d9cb367 --- /dev/null +++ b/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/pkg/useragent/useragent.go b/pkg/useragent/useragent.go new file mode 100644 index 0000000..1137db5 --- /dev/null +++ b/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/pkg/useragent/useragent_test.go b/pkg/useragent/useragent_test.go new file mode 100644 index 0000000..0ad7243 --- /dev/null +++ b/pkg/useragent/useragent_test.go @@ -0,0 +1,31 @@ +package useragent + +import "testing" + +func TestVersionInfo(t *testing.T) { + vi := VersionInfo{"foo", "bar"} + if !vi.isValid() { + t.Fatalf("VersionInfo should be valid") + } + vi = VersionInfo{"", "bar"} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } + vi = VersionInfo{"foo", ""} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } +} + +func TestAppendVersions(t *testing.T) { + vis := []VersionInfo{ + {"foo", "1.0"}, + {"bar", "0.1"}, + {"pi", "3.1.4"}, + } + v := AppendVersions("base", vis...) + expect := "base foo/1.0 bar/0.1 pi/3.1.4" + if v != expect { + t.Fatalf("expected %q, got %q", expect, v) + } +} diff --git a/plugin/backend_linux.go b/plugin/backend_linux.go new file mode 100644 index 0000000..c33beff --- /dev/null +++ b/plugin/backend_linux.go @@ -0,0 +1,865 @@ +// +build linux + +package plugin + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/plugin/v2" + refstore "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +var acceptedPluginFilterTags = map[string]bool{ + "enabled": true, + "capability": true, +} + +// Disable deactivates a plugin. This means resources (volumes, networks) cant use them. +func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if !config.ForceDisable && p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + + for _, typ := range p.GetTypes() { + if typ.Capability == authorization.AuthZApiImplements { + pm.config.AuthzMiddleware.RemovePlugin(p.Name()) + } + } + + if err := pm.disable(p, c); err != nil { + return err + } + pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") + return nil +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + + c := &controller{timeoutInSecs: config.Timeout} + if err := pm.enable(p, c, false); err != nil { + return err + } + pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") + return nil +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return nil, err + } + + return &p.PluginObj, nil +} + +func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error { + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + config.ProgressOutput = progress.ChanOutput(progressChan) + } else { + config.ProgressOutput = progress.DiscardOutput() + } + return distribution.Pull(ctx, ref, config) +} + +type tempConfigStore struct { + config []byte + configDigest digest.Digest +} + +func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { + dgst := digest.FromBytes(c) + + s.config = c + s.configDigest = dgst + + return dgst, nil +} + +func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { + if d != s.configDigest { + return nil, fmt.Errorf("digest not found") + } + return s.config, nil +} + +func (s *tempConfigStore) GetTarSeekStream(d digest.Digest) (ioutils.ReadSeekCloser, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (s *tempConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { + return configToRootFS(c) +} + +func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { + var privileges types.PluginPrivileges + if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { + privileges = append(privileges, types.PluginPrivilege{ + Name: "network", + Description: "permissions to access a network", + Value: []string{c.Network.Type}, + }) + } + if c.IpcHost { + privileges = append(privileges, types.PluginPrivilege{ + Name: "host ipc namespace", + Description: "allow access to host ipc namespace", + Value: []string{"true"}, + }) + } + if c.PidHost { + privileges = append(privileges, types.PluginPrivilege{ + Name: "host pid namespace", + Description: "allow access to host pid namespace", + Value: []string{"true"}, + }) + } + for _, mount := range c.Mounts { + if mount.Source != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "mount", + Description: "host path to mount", + Value: []string{*mount.Source}, + }) + } + } + for _, device := range c.Linux.Devices { + if device.Path != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "device", + Description: "host device to access", + Value: []string{*device.Path}, + }) + } + } + if c.Linux.AllowAllDevices { + privileges = append(privileges, types.PluginPrivilege{ + Name: "allow-all-devices", + Description: "allow 'rwm' access to all devices", + Value: []string{"true"}, + }) + } + if len(c.Linux.Capabilities) > 0 { + privileges = append(privileges, types.PluginPrivilege{ + Name: "capabilities", + Description: "list of additional capabilities required", + Value: c.Linux.Capabilities, + }) + } + + return privileges, nil +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + // create image store instance + cs := &tempConfigStore{} + + // DownloadManager not defined because only pulling configuration. + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: func(string, string, string) {}, + ImageStore: cs, + }, + Schema2Types: distribution.PluginTypes, + } + + if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil { + return nil, err + } + + if cs.config == nil { + return nil, errors.New("no configuration pulled") + } + var config types.PluginConfig + if err := json.Unmarshal(cs.config, &config); err != nil { + return nil, err + } + + return computePrivileges(config) +} + +// Upgrade upgrades a plugin +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return errors.Wrap(err, "plugin must be installed before upgrading") + } + + if p.IsEnabled() { + return fmt.Errorf("plugin must be disabled before upgrading") + } + + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + if _, err := reference.ParseNormalizedNamed(name); err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + if err != nil { + return err + } + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + return nil +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + nameref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + name = reference.FamiliarString(reference.TagNameOnly(nameref)) + + if err := pm.config.Store.validateName(name); err != nil { + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + if err != nil { + return err + } + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges) + if err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + + return nil +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { + if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil { + return nil, err + } + + enabledOnly := false + disabledOnly := false + if pluginFilters.Include("enabled") { + if pluginFilters.ExactMatch("enabled", "true") { + enabledOnly = true + } else if pluginFilters.ExactMatch("enabled", "false") { + disabledOnly = true + } else { + return nil, fmt.Errorf("Invalid filter 'enabled=%s'", pluginFilters.Get("enabled")) + } + } + + plugins := pm.config.Store.GetAll() + out := make([]types.Plugin, 0, len(plugins)) + +next: + for _, p := range plugins { + if enabledOnly && !p.PluginObj.Enabled { + continue + } + if disabledOnly && p.PluginObj.Enabled { + continue + } + if pluginFilters.Include("capability") { + for _, f := range p.GetTypes() { + if !pluginFilters.Match("capability", f.Capability) { + continue next + } + } + } + out = append(out, p.PluginObj) + } + return out, nil +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + + ref, err := reference.ParseNormalizedNamed(p.Name()) + if err != nil { + return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) + } + + var po progress.Output + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + po = progress.ChanOutput(progressChan) + } else { + po = progress.DiscardOutput() + } + + // TODO: replace these with manager + is := &pluginConfigStore{ + pm: pm, + plugin: p, + } + ls := &pluginLayerProvider{ + pm: pm, + plugin: p, + } + rs := &pluginReference{ + name: ref, + pluginID: p.Config, + } + + uploadManager := xfer.NewLayerUploadManager(3) + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + ProgressOutput: po, + RegistryService: pm.config.RegistryService, + ReferenceStore: rs, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: is, + RequireSchema2: true, + }, + ConfigMediaType: schema2.MediaTypePluginConfig, + LayerStore: ls, + UploadManager: uploadManager, + } + + return distribution.Push(ctx, ref, imagePushConfig) +} + +type pluginReference struct { + name reference.Named + pluginID digest.Digest +} + +func (r *pluginReference) References(id digest.Digest) []reference.Named { + if r.pluginID != id { + return nil + } + return []reference.Named{r.name} +} + +func (r *pluginReference) ReferencesByName(ref reference.Named) []refstore.Association { + return []refstore.Association{ + { + Ref: r.name, + ID: r.pluginID, + }, + } +} + +func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) { + if r.name.String() != ref.String() { + return digest.Digest(""), refstore.ErrDoesNotExist + } + return r.pluginID, nil +} + +func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) Delete(ref reference.Named) (bool, error) { + // Read only, ignore + return false, nil +} + +type pluginConfigStore struct { + pm *Manager + plugin *v2.Plugin +} + +func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) { + return digest.Digest(""), errors.New("cannot store config on push") +} + +func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { + if s.plugin.Config != d { + return nil, errors.New("plugin not found") + } + rwc, err := s.pm.blobStore.Get(d) + if err != nil { + return nil, err + } + defer rwc.Close() + return ioutil.ReadAll(rwc) +} + +func (s *pluginConfigStore) GetTarSeekStream(d digest.Digest) (ioutils.ReadSeekCloser, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (s *pluginConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { + return configToRootFS(c) +} + +type pluginLayerProvider struct { + pm *Manager + plugin *v2.Plugin +} + +func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) { + rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs) + var i int + for i = 1; i <= len(rootFS.DiffIDs); i++ { + if layer.CreateChainID(rootFS.DiffIDs[:i]) == id { + break + } + } + if i > len(rootFS.DiffIDs) { + return nil, errors.New("layer not found") + } + return &pluginLayer{ + pm: p.pm, + diffIDs: rootFS.DiffIDs[:i], + blobs: p.plugin.Blobsums[:i], + }, nil +} + +type pluginLayer struct { + pm *Manager + diffIDs []layer.DiffID + blobs []digest.Digest +} + +func (l *pluginLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *pluginLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *pluginLayer) Parent() distribution.PushLayer { + if len(l.diffIDs) == 1 { + return nil + } + return &pluginLayer{ + pm: l.pm, + diffIDs: l.diffIDs[:len(l.diffIDs)-1], + blobs: l.blobs[:len(l.diffIDs)-1], + } +} + +func (l *pluginLayer) Open() (io.ReadCloser, error) { + return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) Size() (int64, error) { + return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) MediaType() string { + return schema2.MediaTypeLayer +} + +func (l *pluginLayer) Release() { + // Nothing needs to be release, no references held +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + p, err := pm.config.Store.GetV2Plugin(name) + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if err != nil { + return err + } + + if !config.ForceRemove { + if p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + if p.IsEnabled() { + return fmt.Errorf("plugin %s is enabled", p.Name()) + } + } + + if p.IsEnabled() { + if err := pm.disable(p, c); err != nil { + logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err) + } + } + + defer func() { + go pm.GC() + }() + + id := p.GetID() + pm.config.Store.Remove(p) + pluginDir := filepath.Join(pm.config.Root, id) + if err := recursiveUnmount(pluginDir); err != nil { + logrus.WithField("dir", pluginDir).WithField("id", id).Warn(err) + } + if err := os.RemoveAll(pluginDir); err != nil { + logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err) + } + pm.config.LogPluginEvent(id, name, "remove") + return nil +} + +func getMounts(root string) ([]string, error) { + infos, err := mount.GetMounts() + if err != nil { + return nil, errors.Wrap(err, "failed to read mount table") + } + + var mounts []string + for _, m := range infos { + if strings.HasPrefix(m.Mountpoint, root) { + mounts = append(mounts, m.Mountpoint) + } + } + + return mounts, nil +} + +func recursiveUnmount(root string) error { + mounts, err := getMounts(root) + if err != nil { + return err + } + + // sort in reverse-lexicographic order so the root mount will always be last + sort.Sort(sort.Reverse(sort.StringSlice(mounts))) + + for i, m := range mounts { + if err := mount.Unmount(m); err != nil { + if i == len(mounts)-1 { + return errors.Wrapf(err, "error performing recursive unmount on %s", root) + } + logrus.WithError(err).WithField("mountpoint", m).Warn("could not unmount") + } + } + + return nil +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + if err := p.Set(args); err != nil { + return err + } + return pm.save(p) +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + ref, err := reference.ParseNormalizedNamed(options.RepoName) + if err != nil { + return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) + } + if _, ok := ref.(reference.Canonical); ok { + return errors.Errorf("canonical references are not permitted") + } + name := reference.FamiliarString(reference.TagNameOnly(ref)) + + if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + if err != nil { + return errors.Wrap(err, "failed to create temp directory") + } + defer os.RemoveAll(tmpRootFSDir) + + var configJSON []byte + rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) + + rootFSBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer rootFSBlob.Close() + gzw := gzip.NewWriter(rootFSBlob) + layerDigester := digest.Canonical.Digester() + rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash())) + + if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { + return err + } + if err := rootFS.Close(); err != nil { + return err + } + + if configJSON == nil { + return errors.New("config not found") + } + + if err := gzw.Close(); err != nil { + return errors.Wrap(err, "error closing gzip writer") + } + + var config types.PluginConfig + if err := json.Unmarshal(configJSON, &config); err != nil { + return errors.Wrap(err, "failed to parse config") + } + + if err := pm.validateConfig(config); err != nil { + return err + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + rootFSBlobsum, err := rootFSBlob.Commit() + if err != nil { + return err + } + defer func() { + if err != nil { + go pm.GC() + } + }() + + config.Rootfs = &types.PluginConfigRootfs{ + Type: "layers", + DiffIds: []string{layerDigester.Digest().String()}, + } + + config.DockerVersion = dockerversion.Version + + configBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer configBlob.Close() + if err := json.NewEncoder(configBlob).Encode(config); err != nil { + return errors.Wrap(err, "error encoding json config") + } + configBlobsum, err := configBlob.Commit() + if err != nil { + return err + } + + p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil) + if err != nil { + return err + } + p.PluginObj.PluginReference = name + + pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") + + return nil +} + +func (pm *Manager) validateConfig(config types.PluginConfig) error { + return nil // TODO: +} + +func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { + pr, pw := io.Pipe() + go func() { + tarReader := tar.NewReader(in) + tarWriter := tar.NewWriter(pw) + defer in.Close() + + hasRootFS := false + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + if !hasRootFS { + pw.CloseWithError(errors.Wrap(err, "no rootfs found")) + return + } + // Signals end of archive. + tarWriter.Close() + pw.Close() + return + } + if err != nil { + pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) + return + } + + content := io.Reader(tarReader) + name := path.Clean(hdr.Name) + if path.IsAbs(name) { + name = name[1:] + } + if name == configFileName { + dt, err := ioutil.ReadAll(content) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) + return + } + *config = dt + } + if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { + hdr.Name = path.Clean(path.Join(parts[1:]...)) + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { + hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] + } + if err := tarWriter.WriteHeader(hdr); err != nil { + pw.CloseWithError(errors.Wrap(err, "error writing tar header")) + return + } + if _, err := pools.Copy(tarWriter, content); err != nil { + pw.CloseWithError(errors.Wrap(err, "error copying tar data")) + return + } + hasRootFS = true + } else { + io.Copy(ioutil.Discard, content) + } + } + }() + return pr +} diff --git a/plugin/backend_unsupported.go b/plugin/backend_unsupported.go new file mode 100644 index 0000000..2d4850e --- /dev/null +++ b/plugin/backend_unsupported.go @@ -0,0 +1,72 @@ +// +build !linux + +package plugin + +import ( + "errors" + "io" + "net/http" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +var errNotSupported = errors.New("plugins are not supported on this platform") + +// Disable deactivates a plugin, which implies that they cannot be used by containers. +func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error { + return errNotSupported +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { + return errNotSupported +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + return nil, errNotSupported +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + return nil, errNotSupported +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error { + return errNotSupported +} + +// Upgrade pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error { + return errNotSupported +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { + return nil, errNotSupported +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error { + return errNotSupported +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + return errNotSupported +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + return errNotSupported +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error { + return errNotSupported +} diff --git a/plugin/blobstore.go b/plugin/blobstore.go new file mode 100644 index 0000000..6b3f984 --- /dev/null +++ b/plugin/blobstore.go @@ -0,0 +1,190 @@ +package plugin + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type blobstore interface { + New() (WriteCommitCloser, error) + Get(dgst digest.Digest) (io.ReadCloser, error) + Size(dgst digest.Digest) (int64, error) +} + +type basicBlobStore struct { + path string +} + +func newBasicBlobStore(p string) (*basicBlobStore, error) { + tmpdir := filepath.Join(p, "tmp") + if err := os.MkdirAll(tmpdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", p) + } + return &basicBlobStore{path: p}, nil +} + +func (b *basicBlobStore) New() (WriteCommitCloser, error) { + f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion") + if err != nil { + return nil, errors.Wrap(err, "failed to create temp file") + } + return newInsertion(f), nil +} + +func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) { + return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) +} + +func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { + stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) + if err != nil { + return 0, err + } + return stat.Size(), nil +} + +func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) { + for _, alg := range []string{string(digest.Canonical)} { + items, err := ioutil.ReadDir(filepath.Join(b.path, alg)) + if err != nil { + continue + } + for _, fi := range items { + if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists { + p := filepath.Join(b.path, alg, fi.Name()) + err := os.RemoveAll(p) + logrus.Debugf("cleaned up blob %v: %v", p, err) + } + } + } + +} + +// WriteCommitCloser defines object that can be committed to blobstore. +type WriteCommitCloser interface { + io.WriteCloser + Commit() (digest.Digest, error) +} + +type insertion struct { + io.Writer + f *os.File + digester digest.Digester + closed bool +} + +func newInsertion(tempFile *os.File) *insertion { + digester := digest.Canonical.Digester() + return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())} +} + +func (i *insertion) Commit() (digest.Digest, error) { + p := i.f.Name() + d := filepath.Join(filepath.Join(p, "../../")) + i.f.Sync() + defer os.RemoveAll(p) + if err := i.f.Close(); err != nil { + return "", err + } + i.closed = true + dgst := i.digester.Digest() + if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %v", d) + } + if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil { + return "", errors.Wrapf(err, "failed to rename %v", p) + } + return dgst, nil +} + +func (i *insertion) Close() error { + if i.closed { + return nil + } + defer os.RemoveAll(i.f.Name()) + return i.f.Close() +} + +type downloadManager struct { + blobStore blobstore + tmpDir string + blobs []digest.Digest + configDigest digest.Digest +} + +func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + // TODO @jhowardmsft LCOW: May need revisiting. + for _, l := range layers { + b, err := dm.blobStore.New() + if err != nil { + return initialRootFS, nil, err + } + defer b.Close() + rc, _, err := l.Download(ctx, progressOutput) + if err != nil { + return initialRootFS, nil, errors.Wrap(err, "failed to download") + } + defer rc.Close() + r := io.TeeReader(rc, b) + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return initialRootFS, nil, err + } + digester := digest.Canonical.Digester() + if _, err := chrootarchive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { + return initialRootFS, nil, err + } + initialRootFS.Append(layer.DiffID(digester.Digest())) + d, err := b.Commit() + if err != nil { + return initialRootFS, nil, err + } + dm.blobs = append(dm.blobs, d) + } + return initialRootFS, nil, nil +} + +func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { + b, err := dm.blobStore.New() + if err != nil { + return "", err + } + defer b.Close() + n, err := b.Write(dt) + if err != nil { + return "", err + } + if n != len(dt) { + return "", io.ErrShortWrite + } + d, err := b.Commit() + dm.configDigest = d + return d, err +} + +func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { + return nil, fmt.Errorf("digest not found") +} + +func (dm *downloadManager) GetTarSeekStream(d digest.Digest) (ioutils.ReadSeekCloser, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (dm *downloadManager) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) { + return configToRootFS(c) +} diff --git a/plugin/defs.go b/plugin/defs.go new file mode 100644 index 0000000..cf44c97 --- /dev/null +++ b/plugin/defs.go @@ -0,0 +1,26 @@ +package plugin + +import ( + "sync" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" +) + +// Store manages the plugin inventory in memory and on-disk +type Store struct { + sync.RWMutex + plugins map[string]*v2.Plugin + /* handlers are necessary for transition path of legacy plugins + * to the new model. Legacy plugins use Handle() for registering an + * activation callback.*/ + handlers map[string][]func(string, *plugins.Client) +} + +// NewStore creates a Store. +func NewStore() *Store { + return &Store{ + plugins: make(map[string]*v2.Plugin), + handlers: make(map[string][]func(string, *plugins.Client)), + } +} diff --git a/plugin/manager.go b/plugin/manager.go new file mode 100644 index 0000000..02d3727 --- /dev/null +++ b/plugin/manager.go @@ -0,0 +1,381 @@ +package plugin + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const configFileName = "config.json" +const rootFSFileName = "rootfs" + +var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func (pm *Manager) restorePlugin(p *v2.Plugin) error { + if p.IsEnabled() { + return pm.restore(p) + } + return nil +} + +type eventLogger func(id, name, action string) + +// ManagerConfig defines configuration needed to start new manager. +type ManagerConfig struct { + Store *Store // remove + Executor libcontainerd.Remote + RegistryService registry.Service + LiveRestoreEnabled bool // TODO: remove + LogPluginEvent eventLogger + Root string + ExecRoot string + AuthzMiddleware *authorization.Middleware +} + +// Manager controls the plugin subsystem. +type Manager struct { + config ManagerConfig + mu sync.RWMutex // protects cMap + muGC sync.RWMutex // protects blobstore deletions + cMap map[*v2.Plugin]*controller + containerdClient libcontainerd.Client + blobStore *basicBlobStore +} + +// controller represents the manager's control on a plugin. +type controller struct { + restart bool + exitChan chan bool + timeoutInSecs int +} + +// pluginRegistryService ensures that all resolved repositories +// are of the plugin class. +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { + repoInfo, err = s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return +} + +// NewManager returns a new plugin manager. +func NewManager(config ManagerConfig) (*Manager, error) { + if config.RegistryService != nil { + config.RegistryService = pluginRegistryService{config.RegistryService} + } + manager := &Manager{ + config: config, + } + if err := os.MkdirAll(manager.config.Root, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root) + } + if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot) + } + if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir()) + } + var err error + manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct + if err != nil { + return nil, errors.Wrap(err, "failed to create containerd client") + } + manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) + if err != nil { + return nil, err + } + + manager.cMap = make(map[*v2.Plugin]*controller) + if err := manager.reload(); err != nil { + return nil, errors.Wrap(err, "failed to restore plugins") + } + return manager, nil +} + +func (pm *Manager) tmpDir() string { + return filepath.Join(pm.config.Root, "tmp") +} + +// StateChanged updates plugin internals using libcontainerd events. +func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { + logrus.Debugf("plugin state changed %s %#v", id, e) + + switch e.State { + case libcontainerd.StateExit: + p, err := pm.config.Store.GetV2Plugin(id) + if err != nil { + return err + } + + os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)) + + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + if err := mount.Unmount(propRoot); err != nil { + logrus.Warn("Could not unmount %s: %v", propRoot, err) + } + } + + pm.mu.RLock() + c := pm.cMap[p] + if c.exitChan != nil { + close(c.exitChan) + } + restart := c.restart + pm.mu.RUnlock() + + if restart { + pm.enable(p, c, true) + } + } + + return nil +} + +func (pm *Manager) reload() error { // todo: restore + dir, err := ioutil.ReadDir(pm.config.Root) + if err != nil { + return errors.Wrapf(err, "failed to read %v", pm.config.Root) + } + plugins := make(map[string]*v2.Plugin) + for _, v := range dir { + if validFullID.MatchString(v.Name()) { + p, err := pm.loadPlugin(v.Name()) + if err != nil { + return err + } + plugins[p.GetID()] = p + } + } + + pm.config.Store.SetAll(plugins) + + var wg sync.WaitGroup + wg.Add(len(plugins)) + for _, p := range plugins { + c := &controller{} // todo: remove this + pm.cMap[p] = c + go func(p *v2.Plugin) { + defer wg.Done() + if err := pm.restorePlugin(p); err != nil { + logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err) + return + } + + if p.Rootfs != "" { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + } + + // We should only enable rootfs propagation for certain plugin types that need it. + for _, typ := range p.PluginObj.Config.Interface.Types { + if (typ.Capability == "volumedriver" || typ.Capability == "graphdriver") && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") { + if p.PluginObj.Config.PropagatedMount != "" { + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + // check if we need to migrate an older propagated mount from before + // these mounts were stored outside the plugin rootfs + if _, err := os.Stat(propRoot); os.IsNotExist(err) { + if _, err := os.Stat(p.PropagatedMount); err == nil { + // make sure nothing is mounted here + // don't care about errors + mount.Unmount(p.PropagatedMount) + if err := os.Rename(p.PropagatedMount, propRoot); err != nil { + logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") + } + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.WithError(err).WithField("dir", p.PropagatedMount).Error("error migrating propagated mount storage") + } + } + } + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + // TODO: sanitize PropagatedMount and prevent breakout + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", p.PropagatedMount, err) + return + } + } + } + } + + pm.save(p) + requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled() + + if requiresManualRestore { + // if liveRestore is not enabled, the plugin will be stopped now so we should enable it + if err := pm.enable(p, c, true); err != nil { + logrus.Errorf("failed to enable plugin '%s': %s", p.Name(), err) + } + } + }(p) + } + wg.Wait() + return nil +} + +func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { + p := filepath.Join(pm.config.Root, id, configFileName) + dt, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "error reading %v", p) + } + var plugin v2.Plugin + if err := json.Unmarshal(dt, &plugin); err != nil { + return nil, errors.Wrapf(err, "error decoding %v", p) + } + return &plugin, nil +} + +func (pm *Manager) save(p *v2.Plugin) error { + pluginJSON, err := json.Marshal(p) + if err != nil { + return errors.Wrap(err, "failed to marshal plugin json") + } + if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil { + return errors.Wrap(err, "failed to write atomically plugin json") + } + return nil +} + +// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine +func (pm *Manager) GC() { + pm.muGC.Lock() + defer pm.muGC.Unlock() + + whitelist := make(map[digest.Digest]struct{}) + for _, p := range pm.config.Store.GetAll() { + whitelist[p.Config] = struct{}{} + for _, b := range p.Blobsums { + whitelist[b] = struct{}{} + } + } + + pm.blobStore.gc(whitelist) +} + +type logHook struct{ id string } + +func (logHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (l logHook) Fire(entry *logrus.Entry) error { + entry.Data = logrus.Fields{"plugin": l.id} + return nil +} + +func attachToLog(id string) func(libcontainerd.IOPipe) error { + return func(iop libcontainerd.IOPipe) error { + iop.Stdin.Close() + + logger := logrus.New() + logger.Hooks.Add(logHook{id}) + // TODO: cache writer per id + w := logger.Writer() + go func() { + io.Copy(w, iop.Stdout) + }() + go func() { + // TODO: update logrus and use logger.WriterLevel + io.Copy(w, iop.Stderr) + }() + return nil + } +} + +func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error { + if !isEqual(requiredPrivileges, privileges, isEqualPrivilege) { + return errors.New("incorrect privileges") + } + + return nil +} + +func isEqual(arrOne, arrOther types.PluginPrivileges, compare func(x, y types.PluginPrivilege) bool) bool { + if len(arrOne) != len(arrOther) { + return false + } + + sort.Sort(arrOne) + sort.Sort(arrOther) + + for i := 1; i < arrOne.Len(); i++ { + if !compare(arrOne[i], arrOther[i]) { + return false + } + } + + return true +} + +func isEqualPrivilege(a, b types.PluginPrivilege) bool { + if a.Name != b.Name { + return false + } + + return reflect.DeepEqual(a.Value, b.Value) +} + +func configToRootFS(c []byte) (*image.RootFS, layer.Platform, error) { + // TODO @jhowardmsft LCOW - Will need to revisit this. For now, calculate the platform. + platform := layer.Platform(runtime.GOOS) + if system.LCOWSupported() { + platform = "linux" + } + var pluginConfig types.PluginConfig + if err := json.Unmarshal(c, &pluginConfig); err != nil { + return nil, "", err + } + // validation for empty rootfs is in distribution code + if pluginConfig.Rootfs == nil { + return nil, platform, nil + } + + return rootFSFromPlugin(pluginConfig.Rootfs), platform, nil +} + +func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { + rootFS := image.RootFS{ + Type: pluginfs.Type, + DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)), + } + for i := range pluginfs.DiffIds { + rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i]) + } + + return &rootFS +} diff --git a/plugin/manager_linux.go b/plugin/manager_linux.go new file mode 100644 index 0000000..1e89940 --- /dev/null +++ b/plugin/manager_linux.go @@ -0,0 +1,320 @@ +// +build linux + +package plugin + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/plugin/v2" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + if p.IsEnabled() && !force { + return fmt.Errorf("plugin %s is already enabled", p.Name()) + } + spec, err := p.InitSpec(pm.config.ExecRoot) + if err != nil { + return err + } + + c.restart = true + c.exitChan = make(chan bool) + + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + + var propRoot string + if p.PropagatedMount != "" { + propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + + if err := mount.MakeRShared(propRoot); err != nil { + return errors.Wrap(err, "error setting up propagated mount dir") + } + + if err := mount.Mount(propRoot, p.PropagatedMount, "none", "rbind"); err != nil { + return errors.Wrap(err, "error creating mount for propagated mount") + } + } + + if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), idtools.IDPair{0, 0}); err != nil { + return errors.WithStack(err) + } + + if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil { + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + if err := mount.Unmount(propRoot); err != nil { + logrus.Warnf("Could not unmount %s: %v", propRoot, err) + } + } + return errors.WithStack(err) + } + + return pm.pluginPostStart(p, c) +} + +func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { + sockAddr := filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()) + client, err := plugins.NewClientWithTimeout("unix://"+sockAddr, nil, time.Duration(c.timeoutInSecs)*time.Second) + if err != nil { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + return errors.WithStack(err) + } + + p.SetPClient(client) + + // Initial sleep before net Dial to allow plugin to listen on socket. + time.Sleep(500 * time.Millisecond) + maxRetries := 3 + var retries int + for { + // net dial into the unix socket to see if someone's listening. + conn, err := net.Dial("unix", sockAddr) + if err == nil { + conn.Close() + break + } + + time.Sleep(3 * time.Second) + retries++ + + if retries > maxRetries { + logrus.Debugf("error net dialing plugin: %v", err) + c.restart = false + // While restoring plugins, we need to explicitly set the state to disabled + pm.config.Store.SetState(p, false) + shutdownPlugin(p, c, pm.containerdClient) + return err + } + + } + pm.config.Store.SetState(p, true) + pm.config.Store.CallHandler(p) + + return pm.save(p) +} + +func (pm *Manager) restore(p *v2.Plugin) error { + if err := pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID())); err != nil { + return err + } + + if pm.config.LiveRestoreEnabled { + c := &controller{} + if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 { + // plugin is not running, so follow normal startup procedure + return pm.enable(p, c, true) + } + + c.exitChan = make(chan bool) + c.restart = true + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + return pm.pluginPostStart(p, c) + } + + return nil +} + +func shutdownPlugin(p *v2.Plugin, c *controller, containerdClient libcontainerd.Client) { + pluginID := p.GetID() + + err := containerdClient.Signal(pluginID, int(syscall.SIGTERM)) + if err != nil { + logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) + } else { + select { + case <-c.exitChan: + logrus.Debug("Clean shutdown of plugin") + case <-time.After(time.Second * 10): + logrus.Debug("Force shutdown plugin") + if err := containerdClient.Signal(pluginID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) + } + } + } +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + if !p.IsEnabled() { + return fmt.Errorf("plugin %s is already disabled", p.Name()) + } + + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + pm.config.Store.SetState(p, false) + return pm.save(p) +} + +// Shutdown stops all plugins and called during daemon shutdown. +func (pm *Manager) Shutdown() { + plugins := pm.config.Store.GetAll() + for _, p := range plugins { + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if pm.config.LiveRestoreEnabled && p.IsEnabled() { + logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") + continue + } + if pm.containerdClient != nil && p.IsEnabled() { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + } + } +} + +func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobsums []digest.Digest, tmpRootFSDir string, privileges *types.PluginPrivileges) (err error) { + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return err + } + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + orig := filepath.Join(pdir, "rootfs") + + // Make sure nothing is mounted + // This could happen if the plugin was disabled with `-f` with active mounts. + // If there is anything in `orig` is still mounted, this should error out. + if err := recursiveUnmount(orig); err != nil { + return err + } + + backup := orig + "-old" + if err := os.Rename(orig, backup); err != nil { + return errors.Wrap(err, "error backing up plugin data before upgrade") + } + + defer func() { + if err != nil { + if rmErr := os.RemoveAll(orig); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") + return + } + if mvErr := os.Rename(backup, orig); mvErr != nil { + err = errors.Wrap(mvErr, "error restoring old plugin root on upgrade failure") + } + if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) + } + } else { + if rmErr := os.RemoveAll(backup); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade") + } + + p.Config = configDigest + p.Blobsums = blobsums + } + }() + + if err := os.Rename(tmpRootFSDir, orig); err != nil { + return errors.Wrap(err, "error upgrading") + } + + p.PluginObj.Config = config + err = pm.save(p) + return errors.Wrap(err, "error saving upgraded plugin config") +} + +func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest.Digest, privileges *types.PluginPrivileges) (types.PluginConfig, error) { + configRC, err := pm.blobStore.Get(configDigest) + if err != nil { + return types.PluginConfig{}, err + } + defer configRC.Close() + + var config types.PluginConfig + dec := json.NewDecoder(configRC) + if err := dec.Decode(&config); err != nil { + return types.PluginConfig{}, errors.Wrapf(err, "failed to parse config") + } + if dec.More() { + return types.PluginConfig{}, errors.New("invalid config json") + } + + requiredPrivileges, err := computePrivileges(config) + if err != nil { + return types.PluginConfig{}, err + } + if privileges != nil { + if err := validatePrivileges(requiredPrivileges, *privileges); err != nil { + return types.PluginConfig{}, err + } + } + + return config, nil +} + +// createPlugin creates a new plugin. take lock before calling. +func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) { + if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store + return nil, err + } + + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return nil, err + } + + p = &v2.Plugin{ + PluginObj: types.Plugin{ + Name: name, + ID: stringid.GenerateRandomID(), + Config: config, + }, + Config: configDigest, + Blobsums: blobsums, + } + p.InitEmptySettings() + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + if err := os.MkdirAll(pdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", pdir) + } + + defer func() { + if err != nil { + os.RemoveAll(pdir) + } + }() + + if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil { + return nil, errors.Wrap(err, "failed to rename rootfs") + } + + if err := pm.save(p); err != nil { + return nil, err + } + + pm.config.Store.Add(p) // todo: remove + + return p, nil +} diff --git a/plugin/manager_solaris.go b/plugin/manager_solaris.go new file mode 100644 index 0000000..72ccae7 --- /dev/null +++ b/plugin/manager_solaris.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/plugin/manager_test.go b/plugin/manager_test.go new file mode 100644 index 0000000..4efe76b --- /dev/null +++ b/plugin/manager_test.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestValidatePrivileges(t *testing.T) { + testData := map[string]struct { + requiredPrivileges types.PluginPrivileges + privileges types.PluginPrivileges + result bool + }{ + "diff-len": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + result: false, + }, + "diff-value": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "GHI"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "***"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + result: false, + }, + "diff-order-but-same-value": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "GHI"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + {Name: "Privilege1", Description: "Description", Value: []string{"GHI", "abc", "def"}}, + }, + result: true, + }, + } + + for key, data := range testData { + err := validatePrivileges(data.requiredPrivileges, data.privileges) + if (err == nil) != data.result { + t.Fatalf("Test item %s expected result to be %t, got %t", key, data.result, (err == nil)) + } + } +} diff --git a/plugin/manager_windows.go b/plugin/manager_windows.go new file mode 100644 index 0000000..4469a67 --- /dev/null +++ b/plugin/manager_windows.go @@ -0,0 +1,30 @@ +// +build windows + +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/plugin/store.go b/plugin/store.go new file mode 100644 index 0000000..7f6e954 --- /dev/null +++ b/plugin/store.go @@ -0,0 +1,270 @@ +package plugin + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" + "github.com/pkg/errors" +) + +/* allowV1PluginsFallback determines daemon's support for V1 plugins. + * When the time comes to remove support for V1 plugins, flipping + * this bool is all that will be needed. + */ +const allowV1PluginsFallback bool = true + +/* defaultAPIVersion is the version of the plugin API for volume, network, + IPAM and authz. This is a very stable API. When we update this API, then + pluginType should include a version. e.g. "networkdriver/2.0". +*/ +const defaultAPIVersion string = "1.0" + +// ErrNotFound indicates that a plugin was not found locally. +type ErrNotFound string + +func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } + +// ErrAmbiguous indicates that more than one plugin was found +type ErrAmbiguous string + +func (name ErrAmbiguous) Error() string { + return fmt.Sprintf("multiple plugins found for %q", string(name)) +} + +// ErrDisabled indicates that a plugin was found but it is disabled +type ErrDisabled string + +func (name ErrDisabled) Error() string { + return fmt.Sprintf("plugin %s found but disabled", string(name)) +} + +// GetV2Plugin retrieves a plugin by name, id or partial ID. +func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { + ps.RLock() + defer ps.RUnlock() + + id, err := ps.resolvePluginID(refOrID) + if err != nil { + return nil, err + } + + p, idOk := ps.plugins[id] + if !idOk { + return nil, errors.WithStack(ErrNotFound(id)) + } + + return p, nil +} + +// validateName returns error if name is already reserved. always call with lock and full name +func (ps *Store) validateName(name string) error { + for _, p := range ps.plugins { + if p.Name() == name { + return errors.Errorf("plugin %q already exists", name) + } + } + return nil +} + +// GetAll retrieves all plugins. +func (ps *Store) GetAll() map[string]*v2.Plugin { + ps.RLock() + defer ps.RUnlock() + return ps.plugins +} + +// SetAll initialized plugins during daemon restore. +func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { + ps.Lock() + defer ps.Unlock() + ps.plugins = plugins +} + +func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin { + ps.RLock() + defer ps.RUnlock() + + result := make([]plugingetter.CompatPlugin, 0, 1) + for _, p := range ps.plugins { + if p.IsEnabled() { + if _, err := p.FilterByCap(capability); err == nil { + result = append(result, p) + } + } + } + return result +} + +// SetState sets the active state of the plugin and updates plugindb. +func (ps *Store) SetState(p *v2.Plugin, state bool) { + ps.Lock() + defer ps.Unlock() + + p.PluginObj.Enabled = state +} + +// Add adds a plugin to memory and plugindb. +// An error will be returned if there is a collision. +func (ps *Store) Add(p *v2.Plugin) error { + ps.Lock() + defer ps.Unlock() + + if v, exist := ps.plugins[p.GetID()]; exist { + return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) + } + ps.plugins[p.GetID()] = p + return nil +} + +// Remove removes a plugin from memory and plugindb. +func (ps *Store) Remove(p *v2.Plugin) { + ps.Lock() + delete(ps.plugins, p.GetID()) + ps.Unlock() +} + +// Get returns an enabled plugin matching the given name and capability. +func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { + var ( + p *v2.Plugin + err error + ) + + // Lookup using new model. + if ps != nil { + p, err = ps.GetV2Plugin(name) + if err == nil { + p.AddRefCount(mode) + if p.IsEnabled() { + return p.FilterByCap(capability) + } + // Plugin was found but it is disabled, so we should not fall back to legacy plugins + // but we should error out right away + return nil, ErrDisabled(name) + } + if _, ok := errors.Cause(err).(ErrNotFound); !ok { + return nil, err + } + } + + // Lookup using legacy model. + if allowV1PluginsFallback { + p, err := plugins.Get(name, capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + return p, nil + } + + return nil, err +} + +// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. +func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { + return ps.getAllByCap(capability) +} + +// GetAllByCap returns a list of enabled plugins matching the given capability. +func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { + result := make([]plugingetter.CompatPlugin, 0, 1) + + /* Daemon start always calls plugin.Init thereby initializing a store. + * So store on experimental builds can never be nil, even while + * handling legacy plugins. However, there are legacy plugin unit + * tests where the volume subsystem directly talks with the plugin, + * bypassing the daemon. For such tests, this check is necessary. + */ + if ps != nil { + ps.RLock() + result = ps.getAllByCap(capability) + ps.RUnlock() + } + + // Lookup with legacy model + if allowV1PluginsFallback { + pl, err := plugins.GetAll(capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + for _, p := range pl { + result = append(result, p) + } + } + return result, nil +} + +// Handle sets a callback for a given capability. It is only used by network +// and ipam drivers during plugin registration. The callback registers the +// driver with the subsystem (network, ipam). +func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { + pluginType := fmt.Sprintf("docker.%s/%s", strings.ToLower(capability), defaultAPIVersion) + + // Register callback with new plugin model. + ps.Lock() + handlers, ok := ps.handlers[pluginType] + if !ok { + handlers = []func(string, *plugins.Client){} + } + handlers = append(handlers, callback) + ps.handlers[pluginType] = handlers + ps.Unlock() + + // Register callback with legacy plugin model. + if allowV1PluginsFallback { + plugins.Handle(capability, callback) + } +} + +// CallHandler calls the registered callback. It is invoked during plugin enable. +func (ps *Store) CallHandler(p *v2.Plugin) { + for _, typ := range p.GetTypes() { + for _, handler := range ps.handlers[typ.String()] { + handler(p.Name(), p.Client()) + } + } +} + +func (ps *Store) resolvePluginID(idOrName string) (string, error) { + ps.RLock() // todo: fix + defer ps.RUnlock() + + if validFullID.MatchString(idOrName) { + return idOrName, nil + } + + ref, err := reference.ParseNormalizedNamed(idOrName) + if err != nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + if _, ok := ref.(reference.Canonical); ok { + logrus.Warnf("canonical references cannot be resolved: %v", reference.FamiliarString(ref)) + return "", errors.WithStack(ErrNotFound(idOrName)) + } + + ref = reference.TagNameOnly(ref) + + for _, p := range ps.plugins { + if p.PluginObj.Name == reference.FamiliarString(ref) { + return p.PluginObj.ID, nil + } + } + + var found *v2.Plugin + for id, p := range ps.plugins { // this can be optimized + if strings.HasPrefix(id, idOrName) { + if found != nil { + return "", errors.WithStack(ErrAmbiguous(idOrName)) + } + found = p + } + } + if found == nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + return found.PluginObj.ID, nil +} diff --git a/plugin/store_test.go b/plugin/store_test.go new file mode 100644 index 0000000..d3876da --- /dev/null +++ b/plugin/store_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/plugin/v2" +) + +func TestFilterByCapNeg(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + iType := types.PluginInterfaceType{Capability: "volumedriver", Prefix: "docker", Version: "1.0"} + i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("foobar") + if err == nil { + t.Fatalf("expected inadequate error, got %v", err) + } +} + +func TestFilterByCapPos(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + + iType := types.PluginInterfaceType{Capability: "volumedriver", Prefix: "docker", Version: "1.0"} + i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("volumedriver") + if err != nil { + t.Fatalf("expected no error, got %v", err) + } +} diff --git a/plugin/v2/plugin.go b/plugin/v2/plugin.go new file mode 100644 index 0000000..74ff640 --- /dev/null +++ b/plugin/v2/plugin.go @@ -0,0 +1,244 @@ +package v2 + +import ( + "fmt" + "strings" + "sync" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/opencontainers/go-digest" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + PropagatedMount string // TODO: make private + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Plugin objects this returns the host path of the plugin container's rootfs. +func (p *Plugin) BasePath() string { + return p.Rootfs +} + +// Client returns the plugin client. +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.Acquire) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.Release) +} diff --git a/plugin/v2/plugin_linux.go b/plugin/v2/plugin_linux.go new file mode 100644 index 0000000..9cae180 --- /dev/null +++ b/plugin/v2/plugin_linux.go @@ -0,0 +1,132 @@ +// +build linux + +package v2 + +import ( + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + s.Root = specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + if p.PluginObj.Config.PidHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) + } + + if p.PluginObj.Config.IpcHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.PropagatedMount != "" { + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + s.Linux.RootfsPropagation = "rshared" + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS) + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + caps := s.Process.Capabilities + caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) + caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) + caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) + caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) + + return &s, nil +} diff --git a/plugin/v2/plugin_unsupported.go b/plugin/v2/plugin_unsupported.go new file mode 100644 index 0000000..e60fb83 --- /dev/null +++ b/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 + +import ( + "errors" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/plugin/v2/settable.go b/plugin/v2/settable.go new file mode 100644 index 0000000..79c6bef --- /dev/null +++ b/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/plugin/v2/settable_test.go b/plugin/v2/settable_test.go new file mode 100644 index 0000000..7183f3a --- /dev/null +++ b/plugin/v2/settable_test.go @@ -0,0 +1,91 @@ +package v2 + +import ( + "reflect" + "testing" +) + +func TestNewSettable(t *testing.T) { + contexts := []struct { + arg string + name string + field string + value string + err error + }{ + {"name=value", "name", "", "value", nil}, + {"name", "name", "", "", nil}, + {"name.field=value", "name", "field", "value", nil}, + {"name.field", "name", "field", "", nil}, + {"=value", "", "", "", errInvalidFormat}, + {"=", "", "", "", errInvalidFormat}, + } + + for _, c := range contexts { + s, err := newSettable(c.arg) + if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + + if s.name != c.name { + t.Fatalf("expected name to be %q, got %q", c.name, s.name) + } + + if s.field != c.field { + t.Fatalf("expected field to be %q, got %q", c.field, s.field) + } + + if s.value != c.value { + t.Fatalf("expected value to be %q, got %q", c.value, s.value) + } + + } +} + +func TestIsSettable(t *testing.T) { + contexts := []struct { + allowedSettableFields []string + set settable + settable []string + result bool + err error + }{ + {allowedSettableFieldsEnv, settable{}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"value"}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"foo"}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value1", "value2"}, false, errMultipleFields}, + } + + for _, c := range contexts { + if res, err := c.set.isSettable(c.allowedSettableFields, c.settable); res != c.result { + t.Fatalf("expected result to be %t, got %t", c.result, res) + } else if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + } +} + +func TestUpdateSettinsEnv(t *testing.T) { + contexts := []struct { + env []string + set settable + newEnv []string + }{ + {[]string{}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"FOO=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0", "BAR=1"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1", "BAR=1"}}, + } + + for _, c := range contexts { + updateSettingsEnv(&c.env, &c.set) + + if !reflect.DeepEqual(c.env, c.newEnv) { + t.Fatalf("expected env to be %q, got %q", c.newEnv, c.env) + } + } +} diff --git a/poule.yml b/poule.yml new file mode 100644 index 0000000..2abf0df --- /dev/null +++ b/poule.yml @@ -0,0 +1,131 @@ +# Add a "status/0-triage" to every newly opened pull request. +- triggers: + pull_request: [ opened ] + operations: + - type: label + filters: { + ~labels: [ "status/0-triage", "status/1-design-review", "status/2-code-review", "status/3-docs-review", "status/4-merge" ], + } + settings: { + patterns: { + status/0-triage: [ ".*" ], + } + } + +# For every newly created or modified issue, assign label based on matching regexp using the `label` +# operation, as well as an Engine-specific version label using `version-label`. +- triggers: + issues: [ edited, opened, reopened ] + operations: + - type: label + settings: { + patterns: { + area/builder: [ "dockerfile", "docker build" ], + area/distribution: [ "docker login", "docker logout", "docker pull", "docker push", "docker search" ], + area/plugins: [ "docker plugin" ], + area/networking: [ "docker network", "ipvs", "vxlan" ], + area/runtime: [ "oci runtime error" ], + area/security/trust: [ "docker_content_trust" ], + area/swarm: [ "docker node", "docker swarm", "docker service create", "docker service inspect", "docker service logs", "docker service ls", "docker service ps", "docker service rm", "docker service scale", "docker service update" ], + platform/desktop: [ "docker for mac", "docker for windows" ], + platform/freebsd: [ "freebsd" ], + platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], + platform/arm: [ "raspberry", "raspbian", "rpi", "beaglebone", "pine64" ], + } + } + - type: version-label + +# Labeling a PR with `rebuild/` triggers a rebuild job for the associated +# configuration. The label is automatically removed after the rebuild is initiated. There's no such +# thing as "templating" in this configuration, so we need one operation for each type of +# configuration that can be triggered. +- triggers: + pull_request: [ labeled ] + operations: + - type: rebuild + settings: { + # When configurations are empty, the `rebuild` operation rebuilds all the currently + # known statuses for that pull request. + configurations: [], + label: "rebuild/*", + } + - type: rebuild + settings: { + configurations: [ arm ], + label: "rebuild/arm", + } + - type: rebuild + settings: { + configurations: [ experimental ], + label: "rebuild/experimental", + } + - type: rebuild + settings: { + configurations: [ janky ], + label: "rebuild/janky", + } + - type: rebuild + settings: { + configurations: [ powerpc ], + label: "rebuild/powerpc", + } + - type: rebuild + settings: { + configurations: [ userns ], + label: "rebuild/userns", + } + - type: rebuild + settings: { + configurations: [ vendor ], + label: "rebuild/vendor", + } + - type: rebuild + settings: { + configurations: [ win2lin ], + label: "rebuild/win2lin", + } + - type: rebuild + settings: { + configurations: [ windowsRS1 ], + label: "rebuild/windowsRS1", + } + - type: rebuild + settings: { + configurations: [ z ], + label: "rebuild/z", + } + +# Once a day, randomly assign pull requests older than 2 weeks. +- schedule: "@daily" + operations: + - type: random-assign + filters: { + age: "2w", + is: "pr", + } + settings: { + users: [ + "aaronlehmann", + "akihirosuda", + "aluzzardi", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "ehazlett", + "johnstep", + "justincormack", + "lk4d4", + "mhbauer", + "mlaventure", + "runcom", + "stevvooe", + "thajeztah", + "tiborvass", + "tonistiigi", + "vdemeester", + "vieux", + "yongtang", + ] + } diff --git a/profiles/apparmor/apparmor.go b/profiles/apparmor/apparmor.go new file mode 100644 index 0000000..48b41c5 --- /dev/null +++ b/profiles/apparmor/apparmor.go @@ -0,0 +1,114 @@ +// +build linux + +package apparmor + +import ( + "bufio" + "io" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/docker/docker/pkg/aaparser" + "github.com/docker/docker/pkg/templates" +) + +var ( + // profileDirectory is the file store for apparmor profiles and macros. + profileDirectory = "/etc/apparmor.d" +) + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(out io.Writer) error { + compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + if err != nil { + return err + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := aaparser.GetVersion() + if err != nil { + return err + } + p.Version = ver + + return compiled.Execute(out, p) +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile in a temp directory determined by +// os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'. +func InstallDefault(name string) error { + p := profileData{ + Name: name, + } + + // Install to a temporary directory. + f, err := ioutil.TempFile("", name) + if err != nil { + return err + } + profilePath := f.Name() + + defer f.Close() + defer os.Remove(profilePath) + + if err := p.generateDefault(f); err != nil { + return err + } + + return aaparser.LoadProfile(profilePath) +} + +// IsLoaded checks if a profile with the given name has been loaded into the +// kernel. +func IsLoaded(name string) (bool, error) { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return false, err + } + defer file.Close() + + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + return false, err + } + if strings.HasPrefix(p, name+" ") { + return true, nil + } + } + + return false, nil +} diff --git a/profiles/apparmor/template.go b/profiles/apparmor/template.go new file mode 100644 index 0000000..c5ea458 --- /dev/null +++ b/profiles/apparmor/template.go @@ -0,0 +1,46 @@ +// +build linux + +package apparmor + +// baseTemplate defines the default apparmor profile for containers. +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer={{.Name}}, +{{end}} +} +` diff --git a/profiles/seccomp/default.json b/profiles/seccomp/default.json new file mode 100755 index 0000000..b71a871 --- /dev/null +++ b/profiles/seccomp/default.json @@ -0,0 +1,750 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + }, + { + "architecture": "SCMP_ARCH_AARCH64", + "subArchitectures": [ + "SCMP_ARCH_ARM" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64" + ] + }, + { + "architecture": "SCMP_ARCH_S390X", + "subArchitectures": [ + "SCMP_ARCH_S390" + ] + } + ], + "syscalls": [ + { + "names": [ + "accept", + "accept4", + "access", + "adjtimex", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "preadv2", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "pwritev2", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131072, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131080, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "sync_file_range2" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "ppc64le" + ] + }, + "excludes": {} + }, + { + "names": [ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", + "breakpoint", + "cacheflush", + "set_tls" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "arm", + "arm64" + ] + }, + "excludes": {} + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32" + ] + }, + "excludes": {} + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32", + "x86" + ] + }, + "excludes": {} + }, + { + "names": [ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": {} + }, + { + "names": [ + "open_by_handle_at" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_DAC_READ_SEARCH" + ] + }, + "excludes": {} + }, + { + "names": [ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + }, + "excludes": {} + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ], + "arches": [ + "s390", + "s390x" + ] + } + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 1, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "s390 parameter ordering for clone is different", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + } + }, + { + "names": [ + "reboot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_BOOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_CHROOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "delete_module", + "init_module", + "finit_module", + "query_module" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_MODULE" + ] + }, + "excludes": {} + }, + { + "names": [ + "acct" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PACCT" + ] + }, + "excludes": {} + }, + { + "names": [ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PTRACE" + ] + }, + "excludes": {} + }, + { + "names": [ + "iopl", + "ioperm" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_RAWIO" + ] + }, + "excludes": {} + }, + { + "names": [ + "settimeofday", + "stime", + "clock_settime" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TIME" + ] + }, + "excludes": {} + }, + { + "names": [ + "vhangup" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TTY_CONFIG" + ] + }, + "excludes": {} + } + ] +} \ No newline at end of file diff --git a/profiles/seccomp/fixtures/example.json b/profiles/seccomp/fixtures/example.json new file mode 100755 index 0000000..674ca50 --- /dev/null +++ b/profiles/seccomp/fixtures/example.json @@ -0,0 +1,27 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + } + ] +} diff --git a/profiles/seccomp/generate.go b/profiles/seccomp/generate.go new file mode 100644 index 0000000..32f22bb --- /dev/null +++ b/profiles/seccomp/generate.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/profiles/seccomp" +) + +// saves the default seccomp profile as a json file so people can use it as a +// base for their own custom profiles +func main() { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + f := filepath.Join(wd, "default.json") + + // write the default profile to the file + b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t") + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile(f, b, 0644); err != nil { + panic(err) + } +} diff --git a/profiles/seccomp/seccomp.go b/profiles/seccomp/seccomp.go new file mode 100644 index 0000000..90a3859 --- /dev/null +++ b/profiles/seccomp/seccomp.go @@ -0,0 +1,150 @@ +// +build linux + +package seccomp + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringutils" + "github.com/opencontainers/runtime-spec/specs-go" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { + return setupSeccomp(DefaultProfile(), rs) +} + +// LoadProfile takes a json string and decodes the seccomp profile. +func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + var config types.Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) + } + return setupSeccomp(&config, rs) +} + +var nativeToSeccomp = map[string]types.Arch{ + "amd64": types.ArchX86_64, + "arm64": types.ArchAARCH64, + "mips64": types.ArchMIPS64, + "mips64n32": types.ArchMIPS64N32, + "mipsel64": types.ArchMIPSEL64, + "mipsel64n32": types.ArchMIPSEL64N32, + "s390x": types.ArchS390X, +} + +func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig := &specs.LinuxSeccomp{} + + var arch string + var native, err = libseccomp.GetNativeArch() + if err == nil { + arch = native.String() + } + + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + + if len(config.ArchMap) != 0 { + for _, a := range config.ArchMap { + seccompArch, ok := nativeToSeccomp[arch] + if ok { + if a.Arch == seccompArch { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch)) + for _, sa := range a.SubArches { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa)) + } + break + } + } + } + } + + newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) + +Loop: + // Loop through all syscall blocks and convert them to libcontainer format after filtering them + for _, call := range config.Syscalls { + if len(call.Excludes.Arches) > 0 { + if stringutils.InSlice(call.Excludes.Arches, arch) { + continue Loop + } + } + if len(call.Excludes.Caps) > 0 { + for _, c := range call.Excludes.Caps { + if stringutils.InSlice(rs.Process.Capabilities.Effective, c) { + continue Loop + } + } + } + if len(call.Includes.Arches) > 0 { + if !stringutils.InSlice(call.Includes.Arches, arch) { + continue Loop + } + } + if len(call.Includes.Caps) > 0 { + for _, c := range call.Includes.Caps { + if !stringutils.InSlice(rs.Process.Capabilities.Effective, c) { + continue Loop + } + } + } + + if call.Name != "" && len(call.Names) != 0 { + return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") + } + + if call.Name != "" { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args)) + } + + for _, n := range call.Names { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args)) + } + } + + return newConfig, nil +} + +func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall { + newCall := specs.LinuxSyscall{ + Names: []string{name}, + Action: specs.LinuxSeccompAction(action), + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range args { + newArg := specs.LinuxSeccompArg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.LinuxSeccompOperator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + return newCall +} diff --git a/profiles/seccomp/seccomp_default.go b/profiles/seccomp/seccomp_default.go new file mode 100644 index 0000000..833dcd5 --- /dev/null +++ b/profiles/seccomp/seccomp_default.go @@ -0,0 +1,640 @@ +// +build linux,seccomp + +package seccomp + +import ( + "syscall" + + "github.com/docker/docker/api/types" +) + +func arches() []types.Architecture { + return []types.Architecture{ + { + Arch: types.ArchX86_64, + SubArches: []types.Arch{types.ArchX86, types.ArchX32}, + }, + { + Arch: types.ArchAARCH64, + SubArches: []types.Arch{types.ArchARM}, + }, + { + Arch: types.ArchMIPS64, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64N32}, + }, + { + Arch: types.ArchMIPS64N32, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64}, + }, + { + Arch: types.ArchMIPSEL64, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64N32}, + }, + { + Arch: types.ArchMIPSEL64N32, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64}, + }, + { + Arch: types.ArchS390X, + SubArches: []types.Arch{types.ArchS390}, + }, + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile() *types.Seccomp { + syscalls := []*types.Syscall{ + { + Names: []string{ + "accept", + "accept4", + "access", + "adjtimex", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "preadv2", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "pwritev2", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0008, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x20000, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x20008, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{ + "sync_file_range2", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"ppc64le"}, + }, + }, + { + Names: []string{ + "arm_fadvise64_64", + "arm_sync_file_range", + "sync_file_range2", + "breakpoint", + "cacheflush", + "set_tls", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"arm", "arm64"}, + }, + }, + { + Names: []string{ + "arch_prctl", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32"}, + }, + }, + { + Names: []string{ + "modify_ldt", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32", "x86"}, + }, + }, + { + Names: []string{ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "open_by_handle_at", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_DAC_READ_SEARCH"}, + }, + }, + { + Names: []string{ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 1, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Comment: "s390 parameter ordering for clone is different", + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "reboot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_BOOT"}, + }, + }, + { + Names: []string{ + "chroot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_CHROOT"}, + }, + }, + { + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_MODULE"}, + }, + }, + { + Names: []string{ + "acct", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PACCT"}, + }, + }, + { + Names: []string{ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PTRACE"}, + }, + }, + { + Names: []string{ + "iopl", + "ioperm", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_RAWIO"}, + }, + }, + { + Names: []string{ + "settimeofday", + "stime", + "clock_settime", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TIME"}, + }, + }, + { + Names: []string{ + "vhangup", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TTY_CONFIG"}, + }, + }, + } + + return &types.Seccomp{ + DefaultAction: types.ActErrno, + ArchMap: arches(), + Syscalls: syscalls, + } +} diff --git a/profiles/seccomp/seccomp_test.go b/profiles/seccomp/seccomp_test.go new file mode 100644 index 0000000..1346921 --- /dev/null +++ b/profiles/seccomp/seccomp_test.go @@ -0,0 +1,32 @@ +// +build linux + +package seccomp + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/oci" +) + +func TestLoadProfile(t *testing.T) { + f, err := ioutil.ReadFile("fixtures/example.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} + +func TestLoadDefaultProfile(t *testing.T) { + f, err := ioutil.ReadFile("default.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} diff --git a/profiles/seccomp/seccomp_unsupported.go b/profiles/seccomp/seccomp_unsupported.go new file mode 100644 index 0000000..3c1d68b --- /dev/null +++ b/profiles/seccomp/seccomp_unsupported.go @@ -0,0 +1,13 @@ +// +build linux,!seccomp + +package seccomp + +import ( + "github.com/docker/docker/api/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultProfile returns a nil pointer on unsupported systems. +func DefaultProfile() *types.Seccomp { + return nil +} diff --git a/project/ARM.md b/project/ARM.md new file mode 100644 index 0000000..e61e3b1 --- /dev/null +++ b/project/ARM.md @@ -0,0 +1,45 @@ +# ARM support + +The ARM support should be considered experimental. It will be extended step by step in the coming weeks. + +Building a Docker Development Image works in the same fashion as for Intel platform (x86-64). +Currently we have initial support for 32bit ARMv7 devices. + +To work with the Docker Development Image you have to clone the Docker/Docker repo on a supported device. +It needs to have a Docker Engine installed to build the Docker Development Image. + +From the root of the Docker/Docker repo one can use make to execute the following make targets: +- make validate +- make binary +- make build +- make deb +- make bundles +- make default +- make shell +- make test-unit +- make test-integration-cli +- make + +The Makefile does include logic to determine on which OS and architecture the Docker Development Image is built. +Based on OS and architecture it chooses the correct Dockerfile. +For the ARM 32bit architecture it uses `Dockerfile.armhf`. + +So for example in order to build a Docker binary one has to: +1. clone the Docker/Docker repository on an ARM device `git clone https://github.com/docker/docker.git` +2. change into the checked out repository with `cd docker` +3. execute `make binary` to create a Docker Engine binary for ARM + +## Kernel modules +A few libnetwork integration tests require that the kernel be +configured with "dummy" network interface and has the module +loaded. However, the dummy module may be not loaded automatically. + +To load the kernel module permanently, run these commands as `root`. + + modprobe dummy + echo "dummy" >> /etc/modules + +On some systems you also have to sync your kernel modules. + + oc-sync-kernel-modules + depmod diff --git a/project/BRANCHES-AND-TAGS.md b/project/BRANCHES-AND-TAGS.md new file mode 100644 index 0000000..1c6f232 --- /dev/null +++ b/project/BRANCHES-AND-TAGS.md @@ -0,0 +1,35 @@ +Branches and tags +================= + +Note: details of the release process for the Engine are documented in the +[RELEASE-CHECKLIST](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +# Branches + +The docker/docker repository should normally have only three living branches at all time, including +the regular `master` branch: + +## `docs` branch + +The `docs` branch supports documentation updates between product releases. This branch allow us to +decouple documentation releases from product releases. + +## `release` branch + +The `release` branch contains the last _released_ version of the code for the project. + +The `release` branch is only updated at each public release of the project. The mechanism for this +is that the release is materialized by a pull request against the `release` branch which lives for +the duration of the code freeze period. When this pull request is merged, the `release` branch gets +updated, and its new state is tagged accordingly. + +# Tags + +Any public release of a compiled binary, with the logical exception of nightly builds, should have +a corresponding tag in the repository. + +The general format of a tag is `vX.Y.Z[-suffix[N]]`: + +- All of `X`, `Y`, `Z` must be specified (example: `v1.0.0`) +- First release candidate for version `1.8.0` should be tagged `v1.8.0-rc1` +- Second alpha release of a product should be tagged `v1.0.0-alpha1` diff --git a/project/CONTRIBUTING.md b/project/CONTRIBUTING.md new file mode 120000 index 0000000..44fcc63 --- /dev/null +++ b/project/CONTRIBUTING.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md new file mode 100644 index 0000000..6ae7baf --- /dev/null +++ b/project/GOVERNANCE.md @@ -0,0 +1,17 @@ +# Docker Governance Advisory Board Meetings + +In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. +All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. + +The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at +[Google Docs Folder](https://goo.gl/Alfj8r) + +These include: + +* First Meeting Notes +* DGAB Charter +* Presentation 1: Introductory Presentation, including State of The Project +* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal +* Presentation 3: Long Term Roadmap/Statement of Direction + + diff --git a/project/IRC-ADMINISTRATION.md b/project/IRC-ADMINISTRATION.md new file mode 100644 index 0000000..824a14b --- /dev/null +++ b/project/IRC-ADMINISTRATION.md @@ -0,0 +1,37 @@ +# Freenode IRC Administration Guidelines and Tips + +This is not meant to be a general "Here's how to IRC" document, so if you're +looking for that, check Google instead. ♥ + +If you've been charged with helping maintain one of Docker's now many IRC +channels, this might turn out to be useful. If there's information that you +wish you'd known about how a particular channel is organized, you should add +deets here! :) + +## `ChanServ` + +Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For +example, `/msg ChanServ ACCESS LIST` will show you a list of everyone +with "access" privileges for a particular channel. + +A similar command is used to give someone a particular access level. For +example, to add a new maintainer to the `#docker-maintainers` access list so +that they can contribute to the discussions (after they've been merged +appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ +ACCESS #docker-maintainers ADD maintainer`. + +To setup a new channel with a similar `maintainer` access template, use a +command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting +them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` +for more details). + +## Troubleshooting + +The most common cause of not-getting-auto-`+v` woes is people not being +`IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with +their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS +ADD` request with something like `xyz is not registered.`. + +This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` +followed by `/msg NickServ GROUP` to group the two nicknames together. See +`/msg NickServ HELP GROUP` for more information. diff --git a/project/ISSUE-TRIAGE.md b/project/ISSUE-TRIAGE.md new file mode 100644 index 0000000..5ef2d31 --- /dev/null +++ b/project/ISSUE-TRIAGE.md @@ -0,0 +1,132 @@ +Triaging of issues +------------------ + +Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: + +- Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +### 1. Ensure the issue contains basic information + +Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: + +- the output of `docker version` +- the output of `docker info` +- the output of `uname -a` +- a reproducible case if this is a bug, Dockerfiles FTW +- host distribution and version ( ubuntu 14.04, RHEL, fedora 23 ) +- page URL if this is a docs issue or the name of a man page + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. + +If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be +reopened when the necessary information is provided. + +### 2. Classify the Issue + +An issue can have multiple of the following labels. Typically, a properly classified issue should +have: + +- One label identifying its kind (`kind/*`). +- One or multiple labels identifying the functional areas of interest (`area/*`). +- Where applicable, one label categorizing its difficulty (`exp/*`). + +#### Issue kind + +| Kind | Description | +|------------------|---------------------------------------------------------------------------------------------------------------------------------| +| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | +| kind/enhancement | Enhancements are not bugs or new features but can drastically improve usability or performance of a project component. | +| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shiny. | +| kind/question | Contains a user or contributor question requiring a response. | + +#### Functional area + +| Area | +|---------------------------| +| area/api | +| area/builder | +| area/bundles | +| area/cli | +| area/daemon | +| area/distribution | +| area/docs | +| area/kernel | +| area/logging | +| area/networking | +| area/plugins | +| area/project | +| area/runtime | +| area/security | +| area/security/apparmor | +| area/security/seccomp | +| area/security/selinux | +| area/security/trust | +| area/storage | +| area/storage/aufs | +| area/storage/btrfs | +| area/storage/devicemapper | +| area/storage/overlay | +| area/storage/zfs | +| area/swarm | +| area/testing | +| area/volumes | + +#### Platform + +| Platform | +|---------------------------| +| platform/arm | +| platform/darwin | +| platform/ibm-power | +| platform/ibm-z | +| platform/windows | + +#### Experience level + +Experience level is a way for a contributor to find an issue based on their +skill set. Experience types are applied to the issue or pull request using +labels. + +| Level | Experience level guideline | +|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| exp/beginner | New to Docker, and possibly Golang, and is looking to help while learning the basics. | +| exp/intermediate | Comfortable with golang and understands the core concepts of Docker and looking to dive deeper into the project. | +| exp/expert | Proficient with Docker and Golang and has been following, and active in, the community to understand the rationale behind design decisions and where the project is headed. | + +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert level task. + +#### Triage status + +To communicate the triage status with other collaborators, you can apply status +labels to issues. These labels prevent duplicating effort. + +| Status | Description | +|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| status/confirmed | You triaged the issue, and were able to reproduce the issue. Always leave a comment how you reproduced, so that the person working on resolving the issue has a way to set up a test-case. +| status/accepted | Apply to enhancements / feature requests that we think are good to have. Adding this label helps contributors find things to work on. +| status/more-info-needed | Apply this to issues that are missing information (e.g. no `docker version` or `docker info` output, or no steps to reproduce), or require feedback from the reporter. If the issue is not updated after a week, it can generally be closed. +| status/needs-attention | Apply this label if an issue (or PR) needs more eyes. + +### 3. Prioritizing issue + +When, and only when, an issue is attached to a specific milestone, the issue can be labeled with the +following labels to indicate their degree of priority (from more urgent to less urgent). + +| Priority | Description | +|-------------|-----------------------------------------------------------------------------------------------------------------------------------| +| priority/P0 | Urgent: Security, critical bugs, blocking issues. P0 basically means drop everything you are doing until this issue is addressed. | +| priority/P1 | Important: P1 issues are a top priority and a must-have for the next release. | +| priority/P2 | Normal priority: default priority applied. | +| priority/P3 | Best effort: those are nice to have / minor issues. | + +And that's it. That should be all the information required for a new or existing contributor to come in a resolve an issue. diff --git a/project/PACKAGE-REPO-MAINTENANCE.md b/project/PACKAGE-REPO-MAINTENANCE.md new file mode 100644 index 0000000..458384a --- /dev/null +++ b/project/PACKAGE-REPO-MAINTENANCE.md @@ -0,0 +1,74 @@ +# Apt & Yum Repository Maintenance +## A maintainer's guide to managing Docker's package repos + +### How to clean up old experimental debs and rpms + +We release debs and rpms for experimental nightly, so these can build up. +To remove old experimental debs and rpms, and _ONLY_ keep the latest, follow the +steps below. + +1. Checkout docker master + +2. Run clean scripts + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh clean-apt-repo clean-yum-repo generate-index-listing sign-repos +``` + +3. Upload the changed repos to `s3` (if you host on s3) + +4. Purge the cache, PURGE the cache, PURGE THE CACHE! + +### How to get out of a sticky situation + +Sh\*t happens. We know. Below are steps to get out of any "hash-sum mismatch" or +"gpg sig error" or the likes error that might happen to the apt repo. + +**NOTE:** These are apt repo specific, have had no experience with anything similar +happening to the yum repo in the past so you can rest easy. + +For each step listed below, move on to the next if the previous didn't work. +Otherwise CELEBRATE! + +1. Purge the cache. + +2. Did you remember to sign the debs after releasing? + +Re-sign the repo with your gpg key: + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh sign-repos +``` + +Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. + +3. Run Jess' magical, save all, only in case of extreme emergencies, "you are +going to have to break this glass to get it" script. + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh update-apt-repo generate-index-listing sign-repos +``` + +4. Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. diff --git a/project/PACKAGERS.md b/project/PACKAGERS.md new file mode 100644 index 0000000..a5b0018 --- /dev/null +++ b/project/PACKAGERS.md @@ -0,0 +1,307 @@ +# Dear Packager, + +If you are looking to make Docker available on your favorite software +distribution, this document is for you. It summarizes the requirements for +building and running the Docker client and the Docker daemon. + +## Getting Started + +We want to help you package Docker successfully. Before doing any packaging, a +good first step is to introduce yourself on the [docker-dev mailing +list](https://groups.google.com/d/forum/docker-dev), explain what you're trying +to achieve, and tell us how we can help. Don't worry, we don't bite! There might +even be someone already working on packaging for the same distro! + +You can also join the IRC channel - #docker and #docker-dev on Freenode are both +active and friendly. + +We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our +"Packagers Relations", since he's always working to make sure our packagers have +a good, healthy upstream to work with (both in our communication and in our +build scripts). If you're having any kind of trouble, feel free to ping him +directly. He also likes to keep track of what distributions we have packagers +for, so feel free to reach out to him even just to say "Hi!" + +## Package Name + +If possible, your package should be called "docker". If that name is already +taken, a second choice is "docker-engine". Another possible choice is "docker.io". + +## Official Build vs Distro Build + +The Docker project maintains its own build and release toolchain. It is pretty +neat and entirely based on Docker (surprise!). This toolchain is the canonical +way to build Docker. We encourage you to give it a try, and if the circumstances +allow you to use it, we recommend that you do. + +You might not be able to use the official build toolchain - usually because your +distribution has a toolchain and packaging policy of its own. We get it! Your +house, your rules. The rest of this document should give you the information you +need to package Docker your way, without denaturing it in the process. + +## Build Dependencies + +To build Docker, you will need the following: + +* A recent version of Git and Mercurial +* Go version 1.6 or later +* A clean checkout of the source added to a valid [Go + workspace](https://golang.org/doc/code.html#Workspaces) under the path + *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, + explained in more detail below) + +To build the Docker daemon, you will additionally need: + +* An amd64/x86_64 machine running Linux +* SQLite version 3.7.9 or later +* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version + 2.02.89 or later +* btrfs-progs version 3.16.1 or later (unless using an older version is + absolutely necessary, in which case 3.8 is the minimum) +* libseccomp version 2.2.1 or later (for build tag seccomp) + +Be sure to also check out Docker's Dockerfile for the most up-to-date list of +these build-time dependencies. + +### Go Dependencies + +All Go dependencies are vendored under "./vendor". They are used by the official +build, so the source of truth for the current version of each dependency is +whatever is in "./vendor". + +To use the vendored dependencies, simply make sure the path to "./vendor" is +included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). + +If you would rather (or must, due to distro policy) package these dependencies +yourself, take a look at "vendor.conf" for an easy-to-parse list of the +exact version for each. + +NOTE: if you're not able to package the exact version (to the exact commit) of a +given dependency, please get in touch so we can remediate! Who knows what +discrepancies can be caused by even the slightest deviation. We promise to do +our best to make everybody happy. + +## Stripping Binaries + +Please, please, please do not strip any compiled binaries. This is really +important. + +In our own testing, stripping the resulting binaries sometimes results in a +binary that appears to work, but more often causes random panics, segfaults, and +other issues. Even if the binary appears to work, please don't strip. + +See the following quotes from Dave Cheney, which explain this position better +from the upstream Golang perspective. + +### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) + +> Super super important: Do not strip go binaries or archives. It isn't tested, +> often breaks, and doesn't work. + +### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) + +> To quote myself: "Please do not strip Go binaries, it is not supported, not +> tested, is often broken, and doesn't do what you want" +> +> To unpack that a bit +> +> * not supported, as in, we don't support it, and recommend against it when +> asked +> * not tested, we don't test stripped binaries as part of the build CI process +> * is often broken, stripping a go binary will produce anywhere from no, to +> subtle, to outright execution failure, see above + +### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) + +> To clarify my previous statements. +> +> * I do not disagree with the debian policy, it is there for a good reason +> * Having said that, it stripping Go binaries doesn't work, and nobody is +> looking at making it work, so there is that. +> +> Thanks for patching the build formula. + +## Building Docker + +Please use our build script ("./hack/make.sh") for all your compilation of +Docker. If there's something you need that it isn't doing, or something it could +be doing to make your life as a packager easier, please get in touch with Tianon +and help us rectify the situation. Chances are good that other packagers have +probably run into the same problems and a fix might already be in the works, but +none of us will know for sure unless you harass Tianon about it. :) + +All the commands listed within this section should be run with the Docker source +checkout as the current working directory. + +### `AUTO_GOPATH` + +If you'd rather not be bothered with the hassles that setting up `GOPATH` +appropriately can be, and prefer to just get a "build that works", you should +add something similar to this to whatever script or process you're using to +build Docker: + +```bash +export AUTO_GOPATH=1 +``` + +This will cause the build scripts to set up a reasonable `GOPATH` that +automatically and properly includes both docker/docker from the local +directory, and the local "./vendor" directory as necessary. + +### `DOCKER_BUILDTAGS` + +If you're building a binary that may need to be used on platforms that include +AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: +```bash +export DOCKER_BUILDTAGS='apparmor' +``` + +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to use the `selinux` build tag: +```bash +export DOCKER_BUILDTAGS='selinux' +``` + +If you're building a binary that may need to be used on platforms that include +seccomp, you will need to use the `seccomp` build tag: +```bash +export DOCKER_BUILDTAGS='seccomp' +``` + +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` + +To disable aufs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + +NOTE: if you need to set more than one build tag, space separate them: +```bash +export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' +``` + +### Static Daemon + +If it is feasible within the constraints of your distribution, you should +seriously consider packaging Docker as a single static binary. A good comparison +is Busybox, which is often packaged statically as a feature to enable mass +portability. Because of the unique way Docker operates, being similarly static +is a "feature". + +To build a static Docker daemon binary, run the following command (first +ensuring that all the necessary libraries are available in static form for +linking - see the "Build Dependencies" section above, and the relevant lines +within Docker's own Dockerfile that set up our official build environment): + +```bash +./hack/make.sh binary +``` + +This will create a static binary under +"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of +the file "./VERSION". This binary is usually installed somewhere like +"/usr/bin/docker". + +### Dynamic Daemon / Client-only Binary + +If you are only interested in a Docker client binary, you can build using: + +```bash +./hack/make.sh binary-client +``` + +If you need to (due to distro policy, distro library availability, or for other +reasons) create a dynamically compiled daemon binary, or if you are only +interested in creating a client binary for Docker, use something similar to the +following: + +```bash +./hack/make.sh dynbinary-client +``` + +This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for +client-only builds is the important file to grab and install as appropriate. + +## System Dependencies + +### Runtime Dependencies + +To function properly, the Docker daemon needs the following software to be +installed and available at runtime: + +* iptables version 1.4 or later +* procps (or similar provider of a "ps" executable) +* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs) +* xfsprogs (in use: mkfs.xfs) +* XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) + +Additionally, the Docker client needs the following software to be installed and +available at runtime: + +* Git version 1.7 or later + +### Kernel Requirements + +The Docker daemon has very specific kernel requirements. Most pre-packaged +kernels already include the necessary options enabled. If you are building your +own kernel, you will either need to discover the options necessary via trial and +error, or check out the [Gentoo +ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), +in which a list is maintained (and if there are any issues or discrepancies in +that list, please contact Tianon so they can be rectified). + +Note that in client mode, there are no specific kernel requirements, and that +the client will even run on alternative platforms such as Mac OS X / Darwin. + +### Optional Dependencies + +Some of Docker's features are activated by using optional command-line flags or +by having support for them in the kernel or userspace. A few examples include: + +* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at + least the "auplink" utility from aufs-tools) +* BTRFS graph driver (requires BTRFS support enabled in the kernel) +* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) +* Libseccomp to allow running seccomp profiles with containers + +## Daemon Init Script + +Docker expects to run as a daemon at machine startup. Your package will need to +include a script for your distro's process supervisor of choice. Be sure to +check out the "contrib/init" folder in case a suitable init script already +exists (and if one does not, contact Tianon about whether it might be +appropriate for your distro's init script to live there too!). + +In general, Docker should be run as root, similar to the following: + +```bash +dockerd +``` + +Generally, a `DOCKER_OPTS` variable of some kind is available for adding more +flags (such as changing the graph driver to use BTRFS, switching the location of +"/var/lib/docker", etc). + +## Communicate + +As a final note, please do feel free to reach out to Tianon at any time for +pretty much anything. He really does love hearing from our packagers and wants +to make sure we're not being a "hostile upstream". As should be a given, we +appreciate the work our packagers do to make sure we have broad distribution! diff --git a/project/PATCH-RELEASES.md b/project/PATCH-RELEASES.md new file mode 100644 index 0000000..548db9a --- /dev/null +++ b/project/PATCH-RELEASES.md @@ -0,0 +1,68 @@ +# Docker patch (bugfix) release process + +Patch releases (the 'Z' in vX.Y.Z) are intended to fix major issues in a +release. Docker open source projects follow these procedures when creating a +patch release; + +After each release (both "major" (vX.Y.0) and "patch" releases (vX.Y.Z)), a +patch release milestone (vX.Y.Z + 1) is created. + +The creation of a patch release milestone is no obligation to actually +*create* a patch release. The purpose of these milestones is to collect +issues and pull requests that can *justify* a patch release; + +- Any maintainer is allowed to add issues and PR's to the milestone, when + doing so, preferably leave a comment on the issue or PR explaining *why* + you think it should be considered for inclusion in a patch release. +- Issues introduced in version vX.Y.0 get added to milestone X.Y.Z+1 +- Only *regressions* should be added. Issues *discovered* in version vX.Y.0, + but already present in version vX.Y-1.Z should not be added, unless + critical. +- Patch releases can *only* contain bug-fixes. New features should + *never* be added to a patch release. + +The release captain of the "major" (X.Y.0) release, is also responsible for +patch releases. The release captain, together with another maintainer, will +review issues and PRs on the milestone, and assigns `priority/`labels. These +review sessions take place on a weekly basis, more frequent if needed: + +- A P0 priority is assigned to critical issues. A maintainer *must* be + assigned to these issues. Maintainers should strive to fix a P0 within a week. +- A P1 priority is assigned to major issues, but not critical. A maintainer + *must* be assigned to these issues. +- P2 and P3 priorities are assigned to other issues. A maintainer can be + assigned. +- Non-critical issues and PR's can be removed from the milestone. Minor + changes, such as typo-fixes or omissions in the documentation can be + considered for inclusion in a patch release. + +## Deciding if a patch release should be done + +- Only a P0 can justify to proceed with the patch release. +- P1, P2, and P3 issues/PR's should not influence the decision, and + should be moved to the X.Y.Z+1 milestone, or removed from the + milestone. + +> **Note**: If the next "major" release is imminent, the release captain +> can decide to cancel a patch release, and include the patches in the +> upcoming major release. + +> **Note**: Security releases are also "patch releases", but follow +> a different procedure. Security releases are developed in a private +> repository, released and tested under embargo before they become +> publicly available. + +## Deciding on the content of a patch release + +When the criteria for moving forward with a patch release are met, the release +manager will decide on the exact content of the release. + +- Fixes to all P0 issues *must* be included in the release. +- Fixes to *some* P1, P2, and P3 issues *may* be included as part of the patch + release depending on the severity of the issue and the risk associated with + the patch. + +Any code delivered as part of a patch release should make life easier for a +significant amount of users with zero chance of degrading anybody's experience. +A good rule of thumb for that is to limit cherry-picking to small patches, which +fix well-understood issues, and which come with verifiable tests. diff --git a/project/PRINCIPLES.md b/project/PRINCIPLES.md new file mode 100644 index 0000000..53f0301 --- /dev/null +++ b/project/PRINCIPLES.md @@ -0,0 +1,19 @@ +# Docker principles + +In the design and development of Docker we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between 2 options, choose the one that is easier to reverse. +* No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The less moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/project/README.md b/project/README.md new file mode 100644 index 0000000..0eb5e58 --- /dev/null +++ b/project/README.md @@ -0,0 +1,24 @@ +# Hacking on Docker + +The `project/` directory holds information and tools for everyone involved in the process of creating and +distributing Docker, specifically: + +## Guides + +If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTING.md](../CONTRIBUTING.md). + +If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). + +If you're a *packager* or aspiring packager, you should read [PACKAGERS.md](./PACKAGERS.md). + +If you're a maintainer in charge of a *release*, you should read [RELEASE-CHECKLIST.md](./RELEASE-CHECKLIST.md). + +## Roadmap + +A high-level roadmap is available at [ROADMAP.md](../ROADMAP.md). + + +## Build tools + +[hack/make.sh](../hack/make.sh) is the primary build tool for docker. It is used for compiling the official binary, +running the test suite, and pushing releases. diff --git a/project/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md new file mode 100644 index 0000000..5c73b58 --- /dev/null +++ b/project/RELEASE-CHECKLIST.md @@ -0,0 +1,519 @@ +# Release Checklist +## A maintainer's guide to releasing Docker + +So you're in charge of a Docker release? Cool. Here's what to do. + +If your experience deviates from this document, please document the changes +to keep it up-to-date. + +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/docker/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +export GITHUBUSER="YOUR_GITHUB_USER" +git remote add origin https://github.com/docker/docker.git +git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git +``` + +### 1. Pull from master and create a release branch + +All releases version numbers will be of the form: vX.Y.Z where X is the major +version number, Y is the minor version number and Z is the patch release version number. + +#### Major releases + +The release branch name is just vX.Y because it's going to be the basis for all .Z releases. + +```bash +export BASE=vX.Y +export VERSION=vX.Y.Z +git fetch origin +git checkout --track origin/master +git checkout -b release/$BASE +``` + +This new branch is going to be the base for the release. We need to push it to origin so we +can track the cherry-picked changes and the version bump: + +```bash +git push origin release/$BASE +``` + +When you have the major release branch in origin, we need to create the bump fork branch +that we'll push to our fork: + +```bash +git checkout -b bump_$VERSION +``` + +#### Patch releases + +If we have the release branch in origin, we can create the forked bump branch from it directly: + +```bash +export VERSION=vX.Y.Z +export PATCH=vX.Y.Z+1 +git fetch origin +git checkout --track origin/release/$BASE +git checkout -b bump_$PATCH +``` + +We cherry-pick only the commits we want into the bump branch: + +```bash +# get the commits ids we want to cherry-pick +git log +# cherry-pick the commits starting from the oldest one, without including merge commits +git cherry-pick -s -x +git cherry-pick -s -x +... +``` + +### 2. Update the VERSION files and API version on master + +We don't want to stop contributions to master just because we are releasing. +So, after the release branch is up, we bump the VERSION and API version to mark +the start of the "next" release. + +#### 2.1 Update the VERSION files + +Update the content of the `VERSION` file to be the next minor (incrementing Y) +and add the `-dev` suffix. For example, after the release branch for 1.5.0 is +created, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the +making"). + +#### 2.2 Update API version on master + +We don't want API changes to go to the now frozen API version. Create a new +entry in `docs/reference/api/` by copying the latest and bumping the version +number (in both the file's name and content), and submit this in a PR against +master. + +### 3. Update CHANGELOG.md + +You can run this command for reference with git 2.0: + +```bash +git fetch --tags +LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If you don't have git 2.0 but have a sort command that supports `-V`: +```bash +git fetch --tags +LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. +```markdown +#### Notable features since +* New docker command to do something useful +* Engine API change (deprecating old version) +* Performance improvements in some usecases +* ... +``` + +For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. +Each change should be listed under a category heading formatted as `#### CATEGORY`. + +`CATEGORY` should describe which part of the project is affected. + Valid categories are: + * Builder + * Documentation + * Hack + * Packaging + * Engine API + * Runtime + * Other (please use this category sparingly) + +Each change should be formatted as `BULLET DESCRIPTION`, given: + +* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or + upgrade, respectively. + +* DESCRIPTION: a concise description of the change that is relevant to the + end-user, using the present tense. Changes should be described in terms + of how they affect the user, for example "Add new feature X which allows Y", + "Fix bug which caused X", "Increase performance of Y". + +EXAMPLES: + +```markdown +## 0.3.6 (1995-12-25) + +#### Builder + ++ 'docker build -t FOO .' applies the tag FOO to the newly built image + +#### Engine API + +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version +``` + +If you need a list of contributors between the last major release and the +current bump branch, use something like: +```bash +git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf +``` +Obviously, you'll need to adjust version numbers as necessary. If you just need +a count, add a simple `| wc -l`. + +### 4. Change the contents of the VERSION file + +Before the big thing, you'll want to make successive release candidates and get +people to test. The release candidate number `N` should be part of the version: + +```bash +export RC_VERSION=${VERSION}-rcN +echo ${RC_VERSION#v} > VERSION +``` + +### 5. Test the docs + +Make sure that your tree includes documentation for any modified or +new features, syntax or semantic changes. + +To test locally: + +```bash +make docs +``` + +To make a shared test at https://beta-docs.docker.io: + +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release +``` + +### 6. Commit and create a pull request to the "release" branch + +```bash +git add VERSION CHANGELOG.md +git commit -m "Bump version to $VERSION" +git push $GITHUBUSER bump_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:release/$BASE...$GITHUBUSER:bump_$VERSION?expand=1" +``` + +That last command will give you the proper link to visit to ensure that you +open the PR against the "release" branch instead of accidentally against +"master" (like so many brave souls before you already have). + +### 7. Create a PR to update the AUTHORS file for the release + +Update the AUTHORS file, by running the `hack/generate-authors.sh` on the +release branch. To prevent duplicate entries, you may need to update the +`.mailmap` file accordingly. + +### 8. Build release candidate rpms and debs + +**NOTE**: It will be a lot faster if you pass a different graphdriver with +`DOCKER_GRAPHDRIVER` than `vfs`. + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -e DOCKER_GRAPHDRIVER=aufs \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 9. Publish release candidate rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 10. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 11. Publish release candidate binaries + +To run this you will need access to the release credentials. Get them from the +Core maintainers. + +```bash +docker build -t docker . + +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +It will run the test suite, build the binaries and upload to the specified bucket, +so this is a good time to verify that you're running against **test**.docker.com. + +### 12. Purge the cache! + +After the binaries are uploaded to test.docker.com and the packages are on +apt.dockerproject.org and yum.dockerproject.org, make sure +they get tested in both Ubuntu and Debian for any obvious installation +issues or runtime issues. + +If everything looks good, it's time to create a git tag for this candidate: + +```bash +git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION +git push origin $RC_VERSION +``` + +Announcing on multiple medias is the best way to get some help testing! An easy +way to get some useful links for sharing: + +```bash +echo "Ubuntu/Debian: curl -sSL https://test.docker.com/ | sh" +echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" +echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" +echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" +echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" +``` +### 13. Announce the release candidate + +The release candidate should be announced on: + +- IRC on #docker, #docker-dev, #docker-maintainers +- In a comment on the pull request to notify subscribed people on GitHub +- The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group +- The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group +- (Optional) Any social media that can bring some attention to the release candidate + +### 14. Iterate on successive release candidates + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + +During this phase, the `bump_$VERSION` branch will keep evolving as you will +produce new release candidates. The frequency of new candidates is up to the +release manager: use your best judgement taking into account the severity of +reported issues, testers availability, and time to scheduled release date. + +Each time you'll want to produce a new release candidate, you will start by +adding commits to the branch, usually by cherry-picking from master: + +```bash +git cherry-pick -s -x -m0 +``` + +You want your "bump commit" (the one that updates the CHANGELOG and VERSION +files) to remain on top, so you'll have to `git rebase -i` to bring it back up. + +Now that your bump commit is back on top, you will need to update the CHANGELOG +file (if appropriate for this particular release candidate), and update the +VERSION file to increment the RC number: + +```bash +export RC_VERSION=$VERSION-rcN +echo $RC_VERSION > VERSION +``` + +You can now amend your last commit and update the bump branch: + +```bash +git commit --amend +git push -f $GITHUBUSER bump_$VERSION +``` + +Repeat steps 6 to 14 to tag the code, publish new binaries, announce availability, and +get help testing. + +### 15. Finalize the bump branch + +When you're happy with the quality of a release candidate, you can move on and +create the real thing. + +You will first have to amend the "bump commit" to drop the release candidate +suffix in the VERSION file: + +```bash +echo $VERSION > VERSION +git add VERSION +git commit --amend +``` + +You will then repeat step 6 to publish the binaries to test + +### 16. Get 2 other maintainers to validate the pull request + +### 17. Build final rpms and debs + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 18. Publish final rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 19. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 20. Publish final binaries + +Once they're tested and reasonably believed to be working, run against +get.docker.com: + +```bash +docker build -t docker . +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=get.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +### 21. Purge the cache! + +### 22. Apply tag and create release + +It's very important that we don't make the tag until after the official +release is uploaded to get.docker.com! + +```bash +git tag -a $VERSION -m $VERSION bump_$VERSION +git push origin $VERSION +``` + +Once the tag is pushed, go to GitHub and create a [new release](https://github.com/docker/docker/releases/new). +If the tag is for an RC make sure you check `This is a pre-release` at the bottom of the form. + +Select the tag that you just pushed as the version and paste the changelog in the description of the release. +You can see examples in this two links: + +https://github.com/docker/docker/releases/tag/v1.8.0 +https://github.com/docker/docker/releases/tag/v1.8.0-rc3 + +### 23. Go to github to merge the `bump_$VERSION` branch into release + +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! + +### 24. Update the docs branch + +You will need to point the docs branch to the newly created release tag: + +```bash +git checkout origin/docs +git reset --hard origin/$VERSION +git push -f origin docs +``` + +The docs will appear on https://docs.docker.com/ (though there may be cached +versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. + +Note that the new docs will not appear live on the site until the cache (a complex, +distributed CDN system) is flushed. The `make docs-release` command will do this +_if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run +and you can check its progress with the CDN Cloudfront Chrome addon. + +### 25. Create a new pull request to merge your bump commit back into master + +```bash +git checkout master +git fetch +git reset --hard origin/master +git cherry-pick -s -x $VERSION +git push $GITHUBUSER merge_release_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" +``` + +Again, get two maintainers to validate, then merge, then push that pretty +blue button to delete your branch. + +### 26. Rejoice and Evangelize! + +Congratulations! You're done. + +Go forth and announce the glad tidings of the new release in `#docker`, +`#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), +the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), +and on Twitter! diff --git a/project/RELEASE-PROCESS.md b/project/RELEASE-PROCESS.md new file mode 100644 index 0000000..d764e9d --- /dev/null +++ b/project/RELEASE-PROCESS.md @@ -0,0 +1,78 @@ +# Docker Release Process + +This document describes how the Docker project is released. The Docker project +release process targets the Engine, Compose, Kitematic, Machine, Swarm, +Distribution, Notary and their underlying dependencies (libnetwork, libkv, +etc...). + +Step-by-step technical details of the process are described in +[RELEASE-CHECKLIST.md](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +## Release cycle + +The Docker project follows a **time-based release cycle** and ships every nine +weeks. A release cycle starts the same day the previous release cycle ends. + +The first six weeks of the cycle are dedicated to development and review. During +this phase, new features and bugfixes submitted to any of the projects are +**eligible** to be shipped as part of the next release. No changeset submitted +during this period is however guaranteed to be merged for the current release +cycle. + +## The freeze period + +Six weeks after the beginning of the cycle, the codebase is officially frozen +and the codebase reaches a state close to the final release. A Release Candidate +(RC) gets created at the same time. The freeze period is used to find bugs and +get feedback on the state of the RC before the release. + +During this freeze period, while the `master` branch will continue its normal +development cycle, no new features are accepted into the RC. As bugs are fixed +in `master` the release owner will selectively 'cherry-pick' critical ones to +be included into the RC. As the RC changes, new ones are made available for the +community to test and review. + +This period lasts for three weeks. + +## How to maximize chances of being merged before the freeze date? + +First of all, there is never a guarantee that a specific changeset is going to +be merged. However there are different actions to follow to maximize the chances +for a changeset to be merged: + +- The team gives priority to review the PRs aligned with the Roadmap (usually +defined by a ROADMAP.md file at the root of the repository). +- The earlier a PR is opened, the more time the maintainers have to review. For +example, if a PR is opened the day before the freeze date, it’s very unlikely +that it will be merged for the release. +- Constant communication with the maintainers (mailing-list, IRC, Github issues, +etc.) allows to get early feedback on the design before getting into the +implementation, which usually reduces the time needed to discuss a changeset. +- If the code is commented, fully tested and by extension follows every single +rules defined by the [CONTRIBUTING guide]( +https://github.com/docker/docker/blob/master/CONTRIBUTING.md), this will help +the maintainers by speeding up the review. + +## The release + +At the end of the freeze (nine weeks after the start of the cycle), all the +projects are released together. + +``` + Codebase Release +Start of is frozen (end of the +the Cycle (7th week) 9th week) ++---------------------------------------+---------------------+ +| | | +| Development phase | Freeze phase | +| | | ++---------------------------------------+---------------------+ + 6 weeks 3 weeks +<---------------------------------------><--------------------> +``` + +## Exceptions + +If a critical issue is found at the end of the freeze period and more time is +needed to address it, the release will be pushed back. When a release gets +pushed back, the next release cycle gets delayed as well. diff --git a/project/REVIEWING.md b/project/REVIEWING.md new file mode 100644 index 0000000..51ef4c5 --- /dev/null +++ b/project/REVIEWING.md @@ -0,0 +1,246 @@ +# Pull request reviewing process + +## Labels + +Labels are carefully picked to optimize for: + + - Readability: maintainers must immediately know the state of a PR + - Filtering simplicity: different labels represent many different aspects of + the reviewing work, and can even be targeted at different maintainers groups. + +A pull request should only be attributed labels documented in this section: other labels that may +exist on the repository should apply to issues. + +### DCO labels + + * `dco/no`: automatically set by a bot when one of the commits lacks proper signature + +### Status labels + + * `status/0-triage` + * `status/1-design-review` + * `status/2-code-review` + * `status/3-docs-review` + * `status/4-ready-to-merge` + +Special status labels: + + * `status/failing-ci`: indicates that the PR in its current state fails the test suite + * `status/needs-attention`: calls for a collective discussion during a review session + +### Impact labels (apply to merged pull requests) + + * `impact/api` + * `impact/changelog` + * `impact/cli` + * `impact/deprecation` + * `impact/distribution` + * `impact/dockerfile` + +### Process labels (apply to merged pull requests) + +Process labels are to assist in preparing (patch) releases. These labels should only be used for pull requests. + +Label | Use for +------------------------------- | ------------------------------------------------------------------------- +`process/cherry-pick` | PRs that should be cherry-picked in the bump/release branch. These pull-requests must also be assigned to a milestone. +`process/cherry-picked` | PRs that have been cherry-picked. This label is helpful to find PR's that have been added to release-candidates, and to update the change log +`process/docs-cherry-pick` | PRs that should be cherry-picked in the docs branch. Only apply this label for changes that apply to the *current* release, and generic documentation fixes, such as Markdown and spelling fixes. +`process/docs-cherry-picked` | PRs that have been cherry-picked in the docs branch +`process/merge-to-master` | PRs that are opened directly on the bump/release branch, but also need to be merged back to "master" +`process/merged-to-master` | PRs that have been merged back to "master" + + +## Workflow + +An opened pull request can be in 1 of 5 distinct states, for each of which there is a corresponding +label that needs to be applied. + +### Triage - `status/0-triage` + +Maintainers are expected to triage new incoming pull requests by removing the `status/0-triage` +label and adding the correct labels (e.g. `status/1-design-review`) before any other interaction +with the PR. The starting label may potentially skip some steps depending on the kind of pull +request: use your best judgement. + +Maintainers should perform an initial, high-level, overview of the pull request before moving it to +the next appropriate stage: + + - Has DCO + - Contains sufficient justification (e.g., usecases) for the proposed change + - References the Github issue it fixes (if any) in the commit or the first Github comment + +Possible transitions from this state: + + * Close: e.g., unresponsive contributor without DCO + * `status/1-design-review`: general case + * `status/2-code-review`: e.g. trivial bugfix + * `status/3-docs-review`: non-proposal documentation-only change + +### Design review - `status/1-design-review` + +Maintainers are expected to comment on the design of the pull request. Review of documentation is +expected only in the context of design validation, not for stylistic changes. + +Ideally, documentation should reflect the expected behavior of the code. No code review should +take place in this step. + +There are no strict rules on the way a design is validated: we usually aim for a consensus, +although a single maintainer approval is often sufficient for obviously reasonable changes. In +general, strong disagreement expressed by any of the maintainers should not be taken lightly. + +Once design is approved, a maintainer should make sure to remove this label and add the next one. + +Possible transitions from this state: + + * Close: design rejected + * `status/2-code-review`: general case + * `status/3-docs-review`: proposals with only documentation changes + +### Code review - `status/2-code-review` + +Maintainers are expected to review the code and ensure that it is good quality and in accordance +with the documentation in the PR. + +New testcases are expected to be added. Ideally, those testcases should fail when the new code is +absent, and pass when present. The testcases should strive to test as many variants, code paths, as +possible to ensure maximum coverage. + +Changes to code must be reviewed and approved (LGTM'd) by a minimum of two code maintainers. When +the author of a PR is a maintainer, he still needs the approval of two other maintainers. + +Once code is approved according to the rules of the subsystem, a maintainer should make sure to +remove this label and add the next one. If documentation is absent but expected, maintainers should +ask for documentation and move to status `status/3-docs-review` for docs maintainer to follow. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/3-docs-review`: general case + * `status/4-ready-to-merge`: change not impacting documentation + +### Docs review - `status/3-docs-review` + +Maintainers are expected to review the documentation in its bigger context, ensuring consistency, +completeness, validity, and breadth of coverage across all existing and new documentation. + +They should ask for any editorial change that makes the documentation more consistent and easier to +understand. + +The docker/docker repository only contains _reference documentation_, all +"narrative" documentation is kept in a [unified documentation +repository](https://github.com/docker/docker.github.io). Reviewers must +therefore verify which parts of the documentation need to be updated. Any +contribution that may require changing the narrative should get the +`impact/documentation` label: this is the signal for documentation maintainers +that a change will likely need to happen on the unified documentation +repository. When in doubt, it’s better to add the label and leave it to +documentation maintainers to decide whether it’s ok to skip. In all cases, +leave a comment to explain what documentation changes you think might be needed. + +- If the pull request does not impact the documentation at all, the docs review + step is skipped, and the pull request is ready to merge. +- If the changes in + the pull request require changes to the reference documentation (either + command-line reference, or API reference), those changes must be included as + part of the pull request and will be reviewed now. Keep in mind that the + narrative documentation may contain output examples of commands, so may need + to be updated as well, in which case the `impact/documentation` label must + be applied. +- If the PR has the `impact/documentation` label, merging is delayed until a + documentation maintainer acknowledges that a corresponding documentation PR + (or issue) is opened on the documentation repository. Once a documentation + maintainer acknowledges the change, she/he will move the PR to `status/4-merge` + for a code maintainer to push the green button. + +Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs +sub-project maintainers. If the docs change originates with a docs maintainer, only one additional +LGTM is required (since we assume a docs maintainer approves of their own PR). + +Once documentation is approved, a maintainer should make sure to remove this label and +add the next one. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/2-code-review`: requires more code changes + * `status/4-ready-to-merge`: general case + +### Merge - `status/4-ready-to-merge` + +Maintainers are expected to merge this pull request as soon as possible. They can ask for a rebase +or carry the pull request themselves. + +Possible transitions from this state: + + * Merge: general case + * Close: carry PR + +After merging a pull request, the maintainer should consider applying one or multiple impact labels +to ease future classification: + + * `impact/api` signifies the patch impacted the Engine API + * `impact/changelog` signifies the change is significant enough to make it in the changelog + * `impact/cli` signifies the patch impacted a CLI command + * `impact/dockerfile` signifies the patch impacted the Dockerfile syntax + * `impact/deprecation` signifies the patch participates in deprecating an existing feature + +### Close + +If a pull request is closed it is expected that sufficient justification will be provided. In +particular, if there are alternative ways of achieving the same net result then those needs to be +spelled out. If the pull request is trying to solve a use case that is not one that we (as a +community) want to support then a justification for why should be provided. + +The number of maintainers it takes to decide and close a PR is deliberately left unspecified. We +assume that the group of maintainers is bound by mutual trust and respect, and that opposition from +any single maintainer should be taken into consideration. Similarly, we expect maintainers to +justify their reasoning and to accept debating. + +## Escalation process + +Despite the previously described reviewing process, some PR might not show any progress for various +reasons: + + - No strong opinion for or against the proposed patch + - Debates about the proper way to solve the problem at hand + - Lack of consensus + - ... + +All these will eventually lead to stalled PR, where no apparent progress is made across several +weeks, or even months. + +Maintainers should use their best judgement and apply the `status/needs-attention` label. It must +be used sparingly, as each PR with such label will be discussed by a group of maintainers during a +review session. The goal of that session is to agree on one of the following outcomes for the PR: + + * Close, explaining the rationale for not pursuing further + * Continue, either by pushing the PR further in the workflow, or by deciding to carry the patch + (ideally, a maintainer should be immediately assigned to make sure that the PR keeps continued + attention) + * Escalate to Solomon by formulating a few specific questions on which his answers will allow + maintainers to decide. + +## Milestones + +Typically, every merged pull request get shipped naturally with the next release cut from the +`master` branch (either the next minor or major version, as indicated by the +[`VERSION`](https://github.com/docker/docker/blob/master/VERSION) file at the root of the +repository). However, the time-based nature of the release process provides no guarantee that a +given pull request will get merged in time. In other words, all open pull requests are implicitly +considered part of the next minor or major release milestone, and this won't be materialized on +GitHub. + +A merged pull request must be attached to the milestone corresponding to the release in which it +will be shipped: this is both useful for tracking, and to help the release manager with the +changelog generation. + +An open pull request may exceptionally get attached to a milestone to express a particular intent to +get it merged in time for that release. This may for example be the case for an important feature to +be included in a minor release, or a critical bugfix to be included in a patch release. + +Finally, and as documented by the [`PATCH-RELEASES.md`](PATCH-RELEASES.md) process, the existence of +a milestone is not a guarantee that a release will happen, as some milestones will be created purely +for the purpose of bookkeeping diff --git a/project/TOOLS.md b/project/TOOLS.md new file mode 100644 index 0000000..dda0fc0 --- /dev/null +++ b/project/TOOLS.md @@ -0,0 +1,63 @@ +# Tools + +This page describes the tools we use and infrastructure that is in place for +the Docker project. + +### CI + +The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our +continuous integration server. Each Pull Request to Docker is tested by running the +equivalent of `make all`. We chose Jenkins because we can host it ourselves and +we run Docker in Docker to test. + +#### Leeroy + +Leeroy is a Go application which integrates Jenkins with +GitHub pull requests. Leeroy uses +[GitHub hooks](https://developer.github.com/v3/repos/hooks/) +to listen for pull request notifications and starts jobs on your Jenkins +server. Using the Jenkins +[notification plugin](https://wiki.jenkins-ci.org/display/JENKINS/Notification+Plugin), +Leeroy updates the pull request using GitHub's +[status API](https://developer.github.com/v3/repos/statuses/) +with pending, success, failure, or error statuses. + +The leeroy repository is maintained at +[github.com/docker/leeroy](https://github.com/docker/leeroy). + +#### GordonTheTurtle IRC Bot + +The GordonTheTurtle IRC Bot lives in the +[#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel +on Freenode. He is built in Go and is based off the project at +[github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). + +His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. +This command works by integrating with Leroy. He has a few other commands too, such +as `!gif` or `!godoc`, but we are always looking for more fun commands to add. + +The gordon-bot repository is maintained at +[github.com/docker/gordon-bot](https://github.com/docker/gordon-bot) + +### NSQ + +We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project +infrastructure. + +#### Hooks + +The hooks project, +[github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), +is a small Go application that manages web hooks from github, hub.docker.com, or +other third party services. + +It can be used for listening to github webhooks & pushing them to a queue, +archiving hooks to rethinkdb for processing, and broadcasting hooks to various +jobs. + +#### Docker Master Binaries + +One of the things queued from the Hooks are the building of the Master +Binaries. This happens on every push to the master branch of Docker. The +repository for this is maintained at +[github.com/docker/docker-bb](https://github.com/docker/docker-bb). diff --git a/reference/store.go b/reference/store.go new file mode 100644 index 0000000..6599235 --- /dev/null +++ b/reference/store.go @@ -0,0 +1,343 @@ +package reference + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref reference.Named + ID digest.Digest +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id digest.Digest) []reference.Named + ReferencesByName(ref reference.Named) []Association + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error + Delete(ref reference.Named) (bool, error) + Get(ref reference.Named) (digest.Digest, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[digest.Digest]map[string]reference.Named + // platform is the container target platform for this store (which may be + // different to the host operating system + platform string +} + +// Repository maps tags to digests. The key is a stringified Reference, +// including the repository name. +type repository map[string]digest.Digest + +type lexicalRefs []reference.Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { + return a[i].String() < a[j].String() +} + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { + return a[i].Ref.String() < a[j].Ref.String() +} + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath, platform string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[digest.Digest]map[string]reference.Named), + platform: platform, + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + return store.addReference(reference.TagNameOnly(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return store.addReference(ref, id, force) +} + +func favorDigest(originalRef reference.Named) (reference.Named, error) { + ref := originalRef + // If the reference includes a digest and a tag, we must store only the + // digest. + canonical, isCanonical := originalRef.(reference.Canonical) + _, isNamedTagged := originalRef.(reference.NamedTagged) + + if isCanonical && isNamedTagged { + trimmed, err := reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + // should never happen + return originalRef, err + } + ref = trimmed + } + return ref, nil +} + +func (store *store) addReference(ref reference.Named, id digest.Digest, force bool) error { + ref, err := favorDigest(ref) + if err != nil { + return err + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + if refName == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[refName] + if !exists || repository == nil { + repository = make(map[string]digest.Digest) + store.Repositories[refName] = repository + } + + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(reference.Canonical); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", refStr, oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]reference.Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref reference.Named) (bool, error) { + ref, err := favorDigest(ref) + if err != nil { + return false, err + } + + ref = reference.TagNameOnly(ref) + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[refName] + if !exists { + return false, ErrDoesNotExist + } + + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, refName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by reference +func (store *store) Get(ref reference.Named) (digest.Digest, error) { + if canonical, ok := ref.(reference.Canonical); ok { + // If reference contains both tag and digest, only + // lookup by digest as it takes precendent over + // tag, until tag/digest combos are stored. + if _, ok := ref.(reference.Tagged); ok { + var err error + ref, err = reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + return "", err + } + } + } else { + ref = reference.TagNameOnly(ref) + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[refName] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[refStr] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given ID. The slice +// will be nil if there are no references to this ID. +func (store *store) References(id digest.Digest) []reference.Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []reference.Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref reference.Named) []Association { + refName := reference.FamiliarName(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[refName] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]reference.Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/reference/store_test.go b/reference/store_test.go new file mode 100644 index 0000000..2c796e7 --- /dev/null +++ b/reference/store_test.go @@ -0,0 +1,358 @@ +package reference + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +var ( + saveLoadTestCases = map[string]digest.Digest{ + "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", + "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", + "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", + "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", + "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", + "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", + "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + } + + marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) +) + +func TestLoad(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.RemoveAll(jsonFile.Name()) + + // Write canned json to the temp file + _, err = jsonFile.Write(marshalledSaveLoadTestCases) + if err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + jsonFile.Close() + + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, expectedID := range saveLoadTestCases { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + id, err := store.Get(ref) + if err != nil { + t.Fatalf("could not find reference %s: %v", refStr, err) + } + if id != expectedID { + t.Fatalf("expected %s - got %s", expectedID, id) + } + } +} + +func TestSave(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, id := range saveLoadTestCases { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + if canonical, ok := ref.(reference.Canonical); ok { + err = store.AddDigest(canonical, id, false) + if err != nil { + t.Fatalf("could not add digest reference %s: %v", refStr, err) + } + } else { + err = store.AddTag(ref, id, false) + if err != nil { + t.Fatalf("could not add reference %s: %v", refStr, err) + } + } + } + + jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) + if err != nil { + t.Fatalf("could not read json file: %v", err) + } + + if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { + t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) + } +} + +func TestAddDeleteGet(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name(), runtime.GOOS) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + testImageID1 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") + testImageID2 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") + testImageID3 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") + + // Try adding a reference with no tag or digest + nameOnly, err := reference.ParseNormalizedNamed("username/repo") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(nameOnly, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Add a few references + ref1, err := reference.ParseNormalizedNamed("username/repo1:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref1, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref2, err := reference.ParseNormalizedNamed("username/repo1:old") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref2, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref3, err := reference.ParseNormalizedNamed("username/repo1:alias") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref3, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref4, err := reference.ParseNormalizedNamed("username/repo2:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref4, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref5, err := reference.ParseNormalizedNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddDigest(ref5.(reference.Canonical), testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Attempt to overwrite with force == false + if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { + t.Fatalf("did not get expected error on overwrite attempt - got %v", err) + } + // Repeat to overwrite with force == true + if err = store.AddTag(ref4, testImageID3, true); err != nil { + t.Fatalf("failed to force tag overwrite: %v", err) + } + + // Check references so far + id, err := store.Get(nameOnly) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref1) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref2) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) + } + + id, err = store.Get(ref3) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref4) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID3 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + id, err = store.Get(ref5) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + // Get should return ErrDoesNotExist for a nonexistent repo + nonExistRepo, err := reference.ParseNormalizedNamed("username/nonexistrepo:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Get should return ErrDoesNotExist for a nonexistent tag + nonExistTag, err := reference.ParseNormalizedNamed("username/repo1:nonexist") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Check References + refs := store.References(testImageID1) + if len(refs) != 3 { + t.Fatal("unexpected number of references") + } + // Looking for the references in this order verifies that they are + // returned lexically sorted. + if refs[0].String() != ref3.String() { + t.Fatalf("unexpected reference: %v", refs[0].String()) + } + if refs[1].String() != ref1.String() { + t.Fatalf("unexpected reference: %v", refs[1].String()) + } + if refs[2].String() != nameOnly.String()+":latest" { + t.Fatalf("unexpected reference: %v", refs[2].String()) + } + + // Check ReferencesByName + repoName, err := reference.ParseNormalizedNamed("username/repo1") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + associations := store.ReferencesByName(repoName) + if len(associations) != 3 { + t.Fatal("unexpected number of associations") + } + // Looking for the associations in this order verifies that they are + // returned lexically sorted. + if associations[0].Ref.String() != ref3.String() { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[0].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[1].Ref.String() != ref1.String() { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[1].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[2].Ref.String() != ref2.String() { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + if associations[2].ID != testImageID2 { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + + // Delete should return ErrDoesNotExist for a nonexistent repo + if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete should return ErrDoesNotExist for a nonexistent tag + if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete a few references + if deleted, err := store.Delete(ref1); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref1); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(ref5); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref5); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(nameOnly); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } +} + +func TestInvalidTags(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "tag-store-test") + defer os.RemoveAll(tmpDir) + + store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json"), runtime.GOOS) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") + + // sha256 as repo name + ref, err := reference.ParseNormalizedNamed("sha256:abc") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting tag %q to fail", ref) + } + + // setting digest as a tag + ref, err = reference.ParseNormalizedNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting tag %q to fail", ref) + } + +} diff --git a/registry/auth.go b/registry/auth.go new file mode 100644 index 0000000..8cadd51 --- /dev/null +++ b/registry/auth.go @@ -0,0 +1,303 @@ +package registry + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +const ( + // AuthClientID is used the ClientID used for the token server + AuthClientID = "docker" +) + +// loginV1 tries to register/login to the v1 registry server. +func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { + registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) + if err != nil { + return "", "", err + } + + serverAddress := registryEndpoint.String() + + logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) + + if serverAddress == "" { + return "", "", fmt.Errorf("Server Error: Server Address not set.") + } + + loginAgainstOfficialIndex := serverAddress == IndexServer + + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + if err != nil { + return "", "", err + } + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + // fallback when request could not be completed + return "", "", fallbackError{ + err: err, + } + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", "", err + } + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", "", nil + } else if resp.StatusCode == http.StatusUnauthorized { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") + } + return "", "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == http.StatusForbidden { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", "", fmt.Errorf("Internal Server Error") + } + return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) +} + +type loginCredentialStore struct { + authConfig *types.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +type staticCredentialStore struct { + auth *types.AuthConfig +} + +// NewStaticCredentialStore returns a credential store +// which always returns the same credential values. +func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { + return staticCredentialStore{ + auth: auth, + } +} + +func (scs staticCredentialStore) Basic(*url.URL) (string, string) { + if scs.auth == nil { + return "", "" + } + return scs.auth.Username, scs.auth.Password +} + +func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { + if scs.auth == nil { + return "" + } + return scs.auth.IdentityToken +} + +func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +type fallbackError struct { + err error +} + +func (err fallbackError) Error() string { + return err.err.Error() +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") + + modifiers := DockerHeaders(userAgent, nil) + authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) + + credentialAuthConfig := *authConfig + creds := loginCredentialStore{ + authConfig: &credentialAuthConfig, + } + + loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + if err != nil { + return "", "", err + } + + endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + +} + +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { + challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return nil, foundV2, err + } + + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: AuthClientID, + Scopes: scopes, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(authTransport, modifiers...) + + return &http.Client{ + Transport: tr, + Timeout: 15 * time.Second, + }, foundV2, nil + +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := GetAuthConfigKey(index) + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == ConvertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return types.AuthConfig{} +} + +// PingResponseError is used when the response from a ping +// was received but invalid. +type PingResponseError struct { + Err error +} + +func (err PingResponseError) Error() string { + return err.Err.Error() +} + +// PingV2Registry attempts to ping a v2 registry and on success return a +// challenge manager for the supported authentication types and +// whether v2 was confirmed by the response. If a response is received but +// cannot be interpreted a PingResponseError will be returned. +func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { + var ( + foundV2 = false + v2Version = auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + ) + + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, false, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundV2 = true + break + } + } + + challengeManager := challenge.NewSimpleManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, foundV2, PingResponseError{ + Err: err, + } + } + + return challengeManager, foundV2, nil +} diff --git a/registry/auth_test.go b/registry/auth_test.go new file mode 100644 index 0000000..9ab71aa --- /dev/null +++ b/registry/auth_test.go @@ -0,0 +1,124 @@ +// +build !solaris + +// TODO: Support Solaris + +package registry + +import ( + "testing" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +func buildAuthConfigs() map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + + for _, registry := range []string{"testIndex", IndexServer} { + authConfigs[registry] = types.AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + } + } + + return authConfigs +} + +func TestSameAuthDataPostSave(t *testing.T) { + authConfigs := buildAuthConfigs() + authConfig := authConfigs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + authConfigs := buildAuthConfigs() + indexConfig := authConfigs[IndexServer] + + officialIndex := ®istrytypes.IndexInfo{ + Official: true, + } + privateIndex := ®istrytypes.IndexInfo{ + Official: false, + } + + resolved := ResolveAuthConfig(authConfigs, officialIndex) + assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") + + resolved = ResolveAuthConfig(authConfigs, privateIndex) + assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + authConfigs := buildAuthConfigs() + + registryAuth := types.AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + } + localAuth := types.AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + } + officialAuth := types.AuthConfig{ + Username: "baz-user", + Password: "baz-pass", + } + authConfigs[IndexServer] = officialAuth + + expectedAuths := map[string]types.AuthConfig{ + "registry.example.com": registryAuth, + "localhost:8000": localAuth, + "registry.com": localAuth, + } + + validRegistries := map[string][]string{ + "registry.example.com": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "localhost:8000": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + configured, ok := expectedAuths[configKey] + if !ok { + t.Fail() + } + index := ®istrytypes.IndexInfo{ + Name: configKey, + } + for _, registry := range registries { + authConfigs[registry] = configured + resolved := ResolveAuthConfig(authConfigs, index) + if resolved.Username != configured.Username || resolved.Password != configured.Password { + t.Errorf("%s -> %v != %v\n", registry, resolved, configured) + } + delete(authConfigs, registry) + resolved = ResolveAuthConfig(authConfigs, index) + if resolved.Username == configured.Username || resolved.Password == configured.Password { + t.Errorf("%s -> %v == %v\n", registry, resolved, configured) + } + } + } +} diff --git a/registry/config.go b/registry/config.go new file mode 100644 index 0000000..651bd73 --- /dev/null +++ b/registry/config.go @@ -0,0 +1,456 @@ +package registry + +import ( + "fmt" + "net" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/opts" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// ServiceOptions holds command line options. +type ServiceOptions struct { + AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only bool `json:"disable-legacy-registry,omitempty"` +} + +// serviceConfig holds daemon configuration for the registry service. +type serviceConfig struct { + registrytypes.ServiceConfig + V2Only bool +} + +var ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + + // IndexHostname is the index hostname + IndexHostname = "index.docker.io" + // IndexServer is used for user auth and image search + IndexServer = "https://" + IndexHostname + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" + + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" + + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = &url.URL{ + Scheme: "https", + Host: "registry-1.docker.io", + } +) + +var ( + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig = newServiceConfig(ServiceOptions{}) +) + +var ( + validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// InstallCliFlags adds command-line options to the top-level flag parser for +// the current process. +func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { + ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, ValidateIndexName) + mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) + insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) + + flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") + flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") + flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") + + options.installCliPlatformFlags(flags) +} + +// newServiceConfig returns a new instance of ServiceConfig +func newServiceConfig(options ServiceOptions) *serviceConfig { + config := &serviceConfig{ + ServiceConfig: registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + }, + V2Only: options.V2Only, + } + + config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts) + config.LoadMirrors(options.Mirrors) + config.LoadInsecureRegistries(options.InsecureRegistries) + + return config +} + +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. +func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { + cidrs := map[string]*registrytypes.NetIPNet{} + hostnames := map[string]bool{} + + for _, r := range registries { + if _, err := ValidateIndexName(r); err != nil { + return err + } + if validateNoScheme(r) != nil { + return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) + } + + if _, ipnet, err := net.ParseCIDR(r); err == nil { + // Valid CIDR. + cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) + } else if err := validateHostPort(r); err == nil { + // Must be `host:port` if not CIDR. + hostnames[r] = true + } else { + return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) + } + } + + config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) + for _, c := range cidrs { + config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) + } + + config.AllowNondistributableArtifactsHostnames = make([]string, 0) + for h := range hostnames { + config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) + } + + return nil +} + +// LoadMirrors loads mirrors to config, after removing duplicates. +// Returns an error if mirrors contains an invalid mirror. +func (config *serviceConfig) LoadMirrors(mirrors []string) error { + mMap := map[string]struct{}{} + unique := []string{} + + for _, mirror := range mirrors { + m, err := ValidateMirror(mirror) + if err != nil { + return err + } + if _, exist := mMap[m]; !exist { + mMap[m] = struct{}{} + unique = append(unique, m) + } + } + + config.Mirrors = unique + + // Configure public registry since mirrors may have changed. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// LoadInsecureRegistries loads insecure registries to config +func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + registries = append(registries, "127.0.0.0/8") + + // Store original InsecureRegistryCIDRs and IndexConfigs + // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. + originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs + originalIndexInfos := config.ServiceConfig.IndexConfigs + + config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) + config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) + +skip: + for _, r := range registries { + // validate insecure registry + if _, err := ValidateIndexName(r); err != nil { + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return err + } + if strings.HasPrefix(strings.ToLower(r), "http://") { + logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) + r = r[7:] + } else if strings.HasPrefix(strings.ToLower(r), "https://") { + logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) + r = r[8:] + } else if validateNoScheme(r) != nil { + // Insecure registry should not contain '://' + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return fmt.Errorf("insecure registry %s should not contain '://'", r) + } + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. + data := (*registrytypes.NetIPNet)(ipnet) + for _, value := range config.InsecureRegistryCIDRs { + if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { + continue skip + } + } + // ipnet is not found, add it in config.InsecureRegistryCIDRs + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) + + } else { + if err := validateHostPort(r); err != nil { + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return fmt.Errorf("insecure registry %s is not valid: %v", r, err) + + } + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = ®istrytypes.IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// allowNondistributableArtifacts returns true if the provided hostname is part of the list of regsitries +// that allow push of nondistributable artifacts. +// +// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP +// of the registry specified by hostname, true is returned. +// +// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If +// resolution fails, CIDR matching is not performed. +func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { + for _, h := range config.AllowNondistributableArtifactsHostnames { + if h == hostname { + return true + } + } + + return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func isSecureIndex(config *serviceConfig, indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides newIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) +} + +// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) +// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be +// resolved to IP addresses for matching. If resolution fails, false is returned. +func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { + host, _, err := net.SplitHostPort(URLHost) + if err != nil { + // Assume URLHost is of the form `host` without the port and go on. + host = URLHost + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range cidrs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return true + } + } + } + + return false +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) + } + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) + } + if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) + } + if uri.User != nil { + // strip password from output + uri.User = url.UserPassword(uri.User.Username(), "xxxxx") + return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) + } + return strings.TrimSuffix(val, "/") + "/", nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + // TODO: upstream this to check to reference package + if val == "index.docker.io" { + val = "docker.io" + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + } + return val, nil +} + +func validateNoScheme(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +func validateHostPort(s string) error { + // Split host and port, and in case s can not be splitted, assume host only + host, port, err := net.SplitHostPort(s) + if err != nil { + host = s + port = "" + } + // If match against the `host:port` pattern fails, + // it might be `IPv6:port`, which will be captured by net.ParseIP(host) + if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { + return fmt.Errorf("invalid host %q", host) + } + if port != "" { + v, err := strconv.Atoi(port) + if err != nil { + return err + } + if v < 0 || v > 65535 { + return fmt.Errorf("invalid port %q", port) + } + } + return nil +} + +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := ®istrytypes.IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = isSecureIndex(config, indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func GetAuthConfigKey(index *registrytypes.IndexInfo) string { + if index.Official { + return IndexServer + } + return index.Name +} + +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, reference.Domain(name)) + if err != nil { + return nil, err + } + official := !strings.ContainsRune(reference.FamiliarName(name), '/') + + return &RepositoryInfo{ + Name: reference.TrimNamed(name), + Index: index, + Official: official, + }, nil +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/registry/config_test.go b/registry/config_test.go new file mode 100644 index 0000000..8cb7e5a --- /dev/null +++ b/registry/config_test.go @@ -0,0 +1,260 @@ +package registry + +import ( + "reflect" + "sort" + "strings" + "testing" +) + +func TestLoadAllowNondistributableArtifacts(t *testing.T) { + testCases := []struct { + registries []string + cidrStrs []string + hostnames []string + err string + }{ + { + registries: []string{"1.2.3.0/24"}, + cidrStrs: []string{"1.2.3.0/24"}, + }, + { + registries: []string{"2001:db8::/120"}, + cidrStrs: []string{"2001:db8::/120"}, + }, + { + registries: []string{"127.0.0.1"}, + hostnames: []string{"127.0.0.1"}, + }, + { + registries: []string{"127.0.0.1:8080"}, + hostnames: []string{"127.0.0.1:8080"}, + }, + { + registries: []string{"2001:db8::1"}, + hostnames: []string{"2001:db8::1"}, + }, + { + registries: []string{"[2001:db8::1]:80"}, + hostnames: []string{"[2001:db8::1]:80"}, + }, + { + registries: []string{"[2001:db8::1]:80"}, + hostnames: []string{"[2001:db8::1]:80"}, + }, + { + registries: []string{"1.2.3.0/24", "2001:db8::/120", "127.0.0.1", "127.0.0.1:8080"}, + cidrStrs: []string{"1.2.3.0/24", "2001:db8::/120"}, + hostnames: []string{"127.0.0.1", "127.0.0.1:8080"}, + }, + + { + registries: []string{"http://mytest.com"}, + err: "allow-nondistributable-artifacts registry http://mytest.com should not contain '://'", + }, + { + registries: []string{"https://mytest.com"}, + err: "allow-nondistributable-artifacts registry https://mytest.com should not contain '://'", + }, + { + registries: []string{"HTTP://mytest.com"}, + err: "allow-nondistributable-artifacts registry HTTP://mytest.com should not contain '://'", + }, + { + registries: []string{"svn://mytest.com"}, + err: "allow-nondistributable-artifacts registry svn://mytest.com should not contain '://'", + }, + { + registries: []string{"-invalid-registry"}, + err: "Cannot begin or end with a hyphen", + }, + { + registries: []string{`mytest-.com`}, + err: `allow-nondistributable-artifacts registry mytest-.com is not valid: invalid host "mytest-.com"`, + }, + { + registries: []string{`1200:0000:AB00:1234:0000:2552:7777:1313:8080`}, + err: `allow-nondistributable-artifacts registry 1200:0000:AB00:1234:0000:2552:7777:1313:8080 is not valid: invalid host "1200:0000:AB00:1234:0000:2552:7777:1313:8080"`, + }, + { + registries: []string{`mytest.com:500000`}, + err: `allow-nondistributable-artifacts registry mytest.com:500000 is not valid: invalid port "500000"`, + }, + { + registries: []string{`"mytest.com"`}, + err: `allow-nondistributable-artifacts registry "mytest.com" is not valid: invalid host "\"mytest.com\""`, + }, + { + registries: []string{`"mytest.com:5000"`}, + err: `allow-nondistributable-artifacts registry "mytest.com:5000" is not valid: invalid host "\"mytest.com"`, + }, + } + for _, testCase := range testCases { + config := newServiceConfig(ServiceOptions{}) + err := config.LoadAllowNondistributableArtifacts(testCase.registries) + if testCase.err == "" { + if err != nil { + t.Fatalf("expect no error, got '%s'", err) + } + + cidrStrs := []string{} + for _, c := range config.AllowNondistributableArtifactsCIDRs { + cidrStrs = append(cidrStrs, c.String()) + } + + sort.Strings(testCase.cidrStrs) + sort.Strings(cidrStrs) + if (len(testCase.cidrStrs) > 0 || len(cidrStrs) > 0) && !reflect.DeepEqual(testCase.cidrStrs, cidrStrs) { + t.Fatalf("expect AllowNondistributableArtifactsCIDRs to be '%+v', got '%+v'", testCase.cidrStrs, cidrStrs) + } + + sort.Strings(testCase.hostnames) + sort.Strings(config.AllowNondistributableArtifactsHostnames) + if (len(testCase.hostnames) > 0 || len(config.AllowNondistributableArtifactsHostnames) > 0) && !reflect.DeepEqual(testCase.hostnames, config.AllowNondistributableArtifactsHostnames) { + t.Fatalf("expect AllowNondistributableArtifactsHostnames to be '%+v', got '%+v'", testCase.hostnames, config.AllowNondistributableArtifactsHostnames) + } + } else { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + } + if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } + } +} + +func TestValidateMirror(t *testing.T) { + valid := []string{ + "http://mirror-1.com", + "http://mirror-1.com/", + "https://mirror-1.com", + "https://mirror-1.com/", + "http://localhost", + "https://localhost", + "http://localhost:5000", + "https://localhost:5000", + "http://127.0.0.1", + "https://127.0.0.1", + "http://127.0.0.1:5000", + "https://127.0.0.1:5000", + } + + invalid := []string{ + "!invalid!://%as%", + "ftp://mirror-1.com", + "http://mirror-1.com/?q=foo", + "http://mirror-1.com/v1/", + "http://mirror-1.com/v1/?q=foo", + "http://mirror-1.com/v1/?q=foo#frag", + "http://mirror-1.com?q=foo", + "https://mirror-1.com#frag", + "https://mirror-1.com/#frag", + "http://foo:bar@mirror-1.com/", + "https://mirror-1.com/v1/", + "https://mirror-1.com/v1/#", + "https://mirror-1.com?q", + } + + for _, address := range valid { + if ret, err := ValidateMirror(address); err != nil || ret == "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } + + for _, address := range invalid { + if ret, err := ValidateMirror(address); err == nil || ret != "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } +} + +func TestLoadInsecureRegistries(t *testing.T) { + testCases := []struct { + registries []string + index string + err string + }{ + { + registries: []string{"127.0.0.1"}, + index: "127.0.0.1", + }, + { + registries: []string{"127.0.0.1:8080"}, + index: "127.0.0.1:8080", + }, + { + registries: []string{"2001:db8::1"}, + index: "2001:db8::1", + }, + { + registries: []string{"[2001:db8::1]:80"}, + index: "[2001:db8::1]:80", + }, + { + registries: []string{"http://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"https://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"HTTP://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"svn://mytest.com"}, + err: "insecure registry svn://mytest.com should not contain '://'", + }, + { + registries: []string{"-invalid-registry"}, + err: "Cannot begin or end with a hyphen", + }, + { + registries: []string{`mytest-.com`}, + err: `insecure registry mytest-.com is not valid: invalid host "mytest-.com"`, + }, + { + registries: []string{`1200:0000:AB00:1234:0000:2552:7777:1313:8080`}, + err: `insecure registry 1200:0000:AB00:1234:0000:2552:7777:1313:8080 is not valid: invalid host "1200:0000:AB00:1234:0000:2552:7777:1313:8080"`, + }, + { + registries: []string{`mytest.com:500000`}, + err: `insecure registry mytest.com:500000 is not valid: invalid port "500000"`, + }, + { + registries: []string{`"mytest.com"`}, + err: `insecure registry "mytest.com" is not valid: invalid host "\"mytest.com\""`, + }, + { + registries: []string{`"mytest.com:5000"`}, + err: `insecure registry "mytest.com:5000" is not valid: invalid host "\"mytest.com"`, + }, + } + for _, testCase := range testCases { + config := newServiceConfig(ServiceOptions{}) + err := config.LoadInsecureRegistries(testCase.registries) + if testCase.err == "" { + if err != nil { + t.Fatalf("expect no error, got '%s'", err) + } + match := false + for index := range config.IndexConfigs { + if index == testCase.index { + match = true + } + } + if !match { + t.Fatalf("expect index configs to contain '%s', got %+v", testCase.index, config.IndexConfigs) + } + } else { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + } + if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } + } +} diff --git a/registry/config_unix.go b/registry/config_unix.go new file mode 100644 index 0000000..fdc39a1 --- /dev/null +++ b/registry/config_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package registry + +import ( + "github.com/spf13/pflag" +) + +var ( + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" +) + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + flags.BoolVar(&options.V2Only, "disable-legacy-registry", true, "Disable contacting legacy registries") +} diff --git a/registry/config_windows.go b/registry/config_windows.go new file mode 100644 index 0000000..d1b313d --- /dev/null +++ b/registry/config_windows.go @@ -0,0 +1,25 @@ +package registry + +import ( + "os" + "path/filepath" + "strings" + + "github.com/spf13/pflag" +) + +// CertsDir is the directory where certificates are stored +var CertsDir = os.Getenv("programdata") + `\docker\certs.d` + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.Replace(s, ":", "", -1)) +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + // No Windows specific flags. +} diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go new file mode 100644 index 0000000..8451d3f --- /dev/null +++ b/registry/endpoint_test.go @@ -0,0 +1,78 @@ +package registry + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServer, IndexServer}, + {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000/nonversion/", "http://0.0.0.0:5000/nonversion/v1/"}, + {"http://0.0.0.0:5000/v0/", "http://0.0.0.0:5000/v0/v1/"}, + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td.str, nil, "", nil) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} + +func TestEndpointParseInvalid(t *testing.T) { + testData := []string{ + "http://0.0.0.0:5000/v2/", + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td, nil, "", nil) + if err == nil { + t.Errorf("expected error parsing %q: parsed as %q", td, e) + } + } +} + +// Ensure that a registry endpoint that responds with a 401 only is determined +// to be a valid v1 registry endpoint +func TestValidateEndpoint(t *testing.T) { + requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) + w.WriteHeader(http.StatusUnauthorized) + }) + + // Make a test server which should validate as a v1 server. + testServer := httptest.NewServer(requireBasicAuthHandler) + defer testServer.Close() + + testServerURL, err := url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint := V1Endpoint{ + URL: testServerURL, + client: HTTPClient(NewTransport(nil)), + } + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.URL.Scheme != "http" { + t.Fatalf("expecting to validate endpoint as http, got url %s", testEndpoint.String()) + } +} diff --git a/registry/endpoint_v1.go b/registry/endpoint_v1.go new file mode 100644 index 0000000..6bcf8c9 --- /dev/null +++ b/registry/endpoint_v1.go @@ -0,0 +1,198 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + registrytypes "github.com/docker/docker/api/types/registry" +) + +// V1Endpoint stores basic information about a V1 registry endpoint. +type V1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// NewV1Endpoint parses the given address to return a registry endpoint. +func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *V1Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + endpoint := &V1Endpoint{ + IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), + URL: new(url.URL), + } + + *endpoint.URL = address + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) + return endpoint, nil +} + +// trimV1Address trims the version off the address and returns the +// trimmed address or an error if there is a non-V1 version. +func trimV1Address(address string) (string, error) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + if apiVersionStr == "v1" { + return strings.Join(chunks[:len(chunks)-1], "/"), nil + } + + for k, v := range apiVersions { + if k != APIVersion1 && apiVersionStr == v { + return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) + } + } + + return address, nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, err + } + + endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *V1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *V1Endpoint) Path(path string) string { + return e.URL.String() + "/v1/" + path +} + +// Ping returns a PingResult which indicates whether the registry is standalone or not. +func (e *V1Endpoint) Ping() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/registry/registry.go b/registry/registry.go new file mode 100644 index 0000000..17fa97c --- /dev/null +++ b/registry/registry.go @@ -0,0 +1,191 @@ +// Package registry contains client primitives to interact with a remote Docker registry. +package registry + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") +) + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault() + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure && CertsDir != "" { + hostDir := filepath.Join(CertsDir, cleanPath(hostname)) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return fmt.Errorf("unable to get system cert pool: %v", err) + } + tlsConfig.RootCAs = systemPool + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// DockerHeaders returns request modifiers with a User-Agent and metaHeaders +func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{} + if userAgent != "" { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ + "User-Agent": []string{userAgent}, + })) + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns an HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + tlsConfig = tlsconfig.ServerDefault() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + return base +} diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go new file mode 100644 index 0000000..58b05d3 --- /dev/null +++ b/registry/registry_mock_test.go @@ -0,0 +1,478 @@ +// +build !solaris + +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/distribution/reference" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/gorilla/mux" + + "github.com/Sirupsen/logrus" +) + +var ( + testHTTPServer *httptest.Server + testHTTPSServer *httptest.Server + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + "other.com": {net.ParseIP("43.43.43.43")}, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHTTPServer.URL + req +} + +func makeHTTPSURL(req string) string { + return testHTTPSServer.URL + req +} + +func makeIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeURL(req), + } + return index +} + +func makeHTTPSIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeHTTPSURL(req), + } + return index +} + +func makePublicIndex() *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: IndexServer, + Secure: true, + Official: true, + } + return index +} + +func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig { + options := ServiceOptions{ + Mirrors: mirrors, + InsecureRegistries: insecureRegistries, + } + + return newServiceConfig(options) +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a != b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v == %v", a, b) + } + t.Fatal(message) +} + +// Similar to assertEqual, but does not stop test +func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a == b { + return + } + message := fmt.Sprintf("%v != %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +// Similar to assertNotEqual, but does not stop test +func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a != b { + return + } + message := fmt.Sprintf("%v == %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + imageID := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[imageID] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[imageID] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName.String()) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + tags = make(map[string]string) + testRepositories[repositoryName.String()] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for imageID, layer := range testLayers { + image := make(map[string]string) + image["id"] = imageID + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := ®istrytypes.SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []registrytypes.SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/registry/registry_test.go b/registry/registry_test.go new file mode 100644 index 0000000..d89c46c --- /dev/null +++ b/registry/registry_test.go @@ -0,0 +1,917 @@ +// +build !solaris + +package registry + +import ( + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "testing" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +var ( + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &types.AuthConfig{} + endpoint, err := NewV1Endpoint(makeIndex("/v1/"), "", nil) + if err != nil { + t.Fatal(err) + } + userAgent := "docker test client" + var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} + tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) + client := HTTPClient(tr) + r, err := NewSession(client, authConfig, endpoint) + if err != nil { + t.Fatal(err) + } + // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` + // header while authenticating, in order to retrieve a token that can be later used to + // perform authenticated actions. + // + // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, + // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. + // + // Because we know that the client's transport is an `*authTransport` we simply cast it, + // in order to set the internal cached token to the fake token, and thus send that fake token + // upon every subsequent requests. + r.client.Transport.(*authTransport).token = token + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { + ep, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + + assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) + } + + testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makePublicIndex(), false, "Expected standalone to be false for public index") +} + +func TestEndpoint(t *testing.T) { + // Simple wrapper to fail test if err != nil + expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { + endpoint, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + return endpoint + } + + assertInsecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") + assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") + index.Secure = false + } + + assertSecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") + assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") + index.Secure = false + } + + index := ®istrytypes.IndexInfo{} + index.Name = makeURL("/v1/") + endpoint := expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertInsecureIndex(index) + + index.Name = makeURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertInsecureIndex(index) + + httpURL := makeURL("") + index.Name = strings.SplitN(httpURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") + assertInsecureIndex(index) + + index.Name = makeHTTPSURL("/v1/") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertSecureIndex(index) + + index.Name = makeHTTPSURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertSecureIndex(index) + + httpsURL := makeHTTPSURL("") + index.Name = strings.SplitN(httpsURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") + assertSecureIndex(index) + + badEndpoints := []string{ + "http://127.0.0.1/v1/", + "https://127.0.0.1/v1/", + "http://127.0.0.1", + "https://127.0.0.1", + "127.0.0.1", + } + for _, address := range badEndpoints { + index.Name = address + _, err := NewV1Endpoint(index, "", nil) + checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") + } +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.LookupRemoteImage(imageID, makeURL("/v1/")) + assertEqual(t, err, nil, "Expected error of remote lookup to nil") + if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { + t.Fatal("Expected error of remote lookup to not nil") + } +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, int64(154), "Expected size 154") + if len(json) == 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") + if err != nil { + t.Fatal(err) + } + assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNormalizedNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 2, "Expected two tags") + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) + assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNormalizedNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedURL, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedURL.Host + "/v1/" + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + data, err := r.GetRepositoryData(repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type staticRepositoryInfo struct { + Index *registrytypes.IndexInfo + RemoteName string + CanonicalName string + LocalName string + Official bool + } + + expectedRepoInfos := map[string]staticRepositoryInfo{ + "fooo/bar": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "fooo/bar", + LocalName: "fooo/bar", + CanonicalName: "docker.io/fooo/bar", + Official: false, + }, + "library/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "nonlibrary/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "nonlibrary/ubuntu", + LocalName: "nonlibrary/ubuntu", + CanonicalName: "docker.io/nonlibrary/ubuntu", + Official: false, + }, + "ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "other/library": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "other/library", + LocalName: "other/library", + CanonicalName: "docker.io/other/library", + Official: false, + }, + "127.0.0.1:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "127.0.0.1:8000/private/moonbase", + CanonicalName: "127.0.0.1:8000/private/moonbase", + Official: false, + }, + "127.0.0.1:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "127.0.0.1:8000/privatebase", + CanonicalName: "127.0.0.1:8000/privatebase", + Official: false, + }, + "localhost:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost:8000/private/moonbase", + CanonicalName: "localhost:8000/private/moonbase", + Official: false, + }, + "localhost:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost:8000/privatebase", + CanonicalName: "localhost:8000/privatebase", + Official: false, + }, + "example.com/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com/private/moonbase", + CanonicalName: "example.com/private/moonbase", + Official: false, + }, + "example.com/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com/privatebase", + CanonicalName: "example.com/privatebase", + Official: false, + }, + "example.com:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com:8000/private/moonbase", + CanonicalName: "example.com:8000/private/moonbase", + Official: false, + }, + "example.com:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com:8000/privatebase", + CanonicalName: "example.com:8000/privatebase", + Official: false, + }, + "localhost/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost/private/moonbase", + CanonicalName: "localhost/private/moonbase", + Official: false, + }, + "localhost/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost/privatebase", + CanonicalName: "localhost/privatebase", + Official: false, + }, + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "index." + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + "index." + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + } + + for reposName, expectedRepoInfo := range expectedRepoInfos { + named, err := reference.ParseNormalizedNamed(reposName) + if err != nil { + t.Error(err) + } + + repoInfo, err := ParseRepositoryInfo(named) + if err != nil { + t.Error(err) + } else { + checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) + checkEqual(t, reference.Path(repoInfo.Name), expectedRepoInfo.RemoteName, reposName) + checkEqual(t, reference.FamiliarName(repoInfo.Name), expectedRepoInfo.LocalName, reposName) + checkEqual(t, repoInfo.Name.Name(), expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) + checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) + } + } +} + +func TestNewIndexInfo(t *testing.T) { + testIndexInfo := func(config *serviceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { + for indexName, expectedIndexInfo := range expectedIndexInfos { + index, err := newIndexInfo(config, indexName) + if err != nil { + t.Fatal(err) + } else { + checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") + checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") + checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") + checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") + } + } + } + + config := newServiceConfig(ServiceOptions{}) + noMirrors := []string{} + expectedIndexInfos := map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} + config = makeServiceConfig(publicMirrors, []string{"example.com"}) + + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) +} + +func TestMirrorEndpointLookup(t *testing.T) { + containsMirror := func(endpoints []APIEndpoint) bool { + for _, pe := range endpoints { + if pe.URL.Host == "my.mirror" { + return true + } + } + return false + } + s := DefaultService{config: makeServiceConfig([]string{"https://my.mirror"}, nil)} + + imageName, err := reference.WithName(IndexName + "/test/image") + if err != nil { + t.Error(err) + } + pushAPIEndpoints, err := s.LookupPushEndpoints(reference.Domain(imageName)) + if err != nil { + t.Fatal(err) + } + if containsMirror(pushAPIEndpoints) { + t.Fatal("Push endpoint should not contain mirror") + } + + pullAPIEndpoints, err := s.LookupPullEndpoints(reference.Domain(imageName)) + if err != nil { + t.Fatal(err) + } + if !containsMirror(pullAPIEndpoints) { + t.Fatal("Pull endpoint should contain mirror") + } +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoRef, err := reference.ParseNormalizedNamed(REPO) + if err != nil { + t.Fatal(err) + } + repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery", 25) + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestAllowNondistributableArtifacts(t *testing.T) { + tests := []struct { + addr string + registries []string + expected bool + }{ + {IndexName, nil, false}, + {"example.com", []string{}, false}, + {"example.com", []string{"example.com"}, true}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, true}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, true}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, false}, + {"example.com", []string{"example.com"}, true}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, true}, + {"example.com", []string{"42.42.0.0/16"}, true}, + {"example.com:5000", []string{"42.42.42.42/8"}, true}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, true}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, true}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, false}, + {"invalid.domain.com", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, true}, + } + for _, tt := range tests { + config := newServiceConfig(ServiceOptions{ + AllowNondistributableArtifacts: tt.registries, + }) + if v := allowNondistributableArtifacts(config, tt.addr); v != tt.expected { + t.Errorf("allowNondistributableArtifacts failed for %q %v, expected %v got %v", tt.addr, tt.registries, tt.expected, v) + } + } +} + +func TestIsSecureIndex(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexName, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, + {"invalid.domain.com", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, + } + for _, tt := range tests { + config := makeServiceConfig(nil, tt.insecureRegistries) + if sec := isSecureIndex(config, tt.addr); sec != tt.expected { + t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} + +type debugTransport struct { + http.RoundTripper + log func(...interface{}) +} + +func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { + dump, err := httputil.DumpRequestOut(req, false) + if err != nil { + tr.log("could not dump request") + } + tr.log(string(dump)) + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + dump, err = httputil.DumpResponse(resp, false) + if err != nil { + tr.log("could not dump response") + } + tr.log(string(dump)) + return resp, err +} diff --git a/registry/resumable/resumablerequestreader.go b/registry/resumable/resumablerequestreader.go new file mode 100644 index 0000000..5403c76 --- /dev/null +++ b/registry/resumable/resumablerequestreader.go @@ -0,0 +1,96 @@ +package resumable + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type requestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 + waitDuration time.Duration +} + +// NewRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second} +} + +// NewRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second} +} + +func (r *requestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(r.waitDuration) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(time.Duration(r.failures) * r.waitDuration) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *requestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *requestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/registry/resumable/resumablerequestreader_test.go b/registry/resumable/resumablerequestreader_test.go new file mode 100644 index 0000000..a632bc6 --- /dev/null +++ b/registry/resumable/resumablerequestreader_test.go @@ -0,0 +1,256 @@ +package resumable + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +func TestResumableRequestHeaderSimpleErrors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, world !") + })) + defer ts.Close() + + client := &http.Client{} + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + resreq := &requestReader{} + _, err = resreq.Read([]byte{}) + assert.EqualError(t, err, "client and request can't be nil") + + resreq = &requestReader{ + client: client, + request: req, + totalSize: -1, + } + _, err = resreq.Read([]byte{}) + assert.EqualError(t, err, "failed to auto detect content length") +} + +// Not too much failures, bails out after some wait +func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + require.NoError(t, err) + + resreq := &requestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + waitDuration: 10 * time.Millisecond, + } + read, err := resreq.Read([]byte{}) + require.NoError(t, err) + assert.Equal(t, 0, read) +} + +// Too much failures, returns the error +func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + require.NoError(t, err) + + resreq := &requestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 1, + } + defer resreq.Close() + + expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` + read, err := resreq.Read([]byte{}) + assert.EqualError(t, err, expectedError) + assert.Equal(t, 0, read) +} + +type errorReaderCloser struct{} + +func (errorReaderCloser) Close() error { return nil } + +func (errorReaderCloser) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("An error occurred") +} + +// If an unknown error is encountered, return 0, nil and log it +func TestResumableRequestReaderWithReadError(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + require.NoError(t, err) + + client := &http.Client{} + + response := &http.Response{ + Status: "500 Internal Server", + StatusCode: 500, + ContentLength: 0, + Close: true, + Body: errorReaderCloser{}, + } + + resreq := &requestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + read, err := resreq.Read(buf) + require.NoError(t, err) + + assert.Equal(t, 0, read) +} + +func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + require.NoError(t, err) + + client := &http.Client{} + + response := &http.Response{ + Status: "416 Requested Range Not Satisfiable", + StatusCode: 416, + ContentLength: 0, + Close: true, + Body: ioutil.NopCloser(strings.NewReader("")), + } + + resreq := &requestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + _, err = resreq.Read(buf) + assert.EqualError(t, err, io.EOF.Error()) +} + +func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatalf("Expected a Range HTTP header, got nothing") + } + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + client := &http.Client{} + + resreq := &requestReader{ + client: client, + request: req, + lastRange: 1, + } + defer resreq.Close() + + buf := make([]byte, 2) + _, err = resreq.Read(buf) + assert.EqualError(t, err, "the server doesn't support byte ranges") +} + +func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + client := &http.Client{} + retries := uint32(5) + + resreq := NewRequestReader(client, req, retries, 0) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + require.NoError(t, err) + + resstr := strings.TrimSuffix(string(data), "\n") + assert.Equal(t, srvtxt, resstr) +} + +func TestResumableRequestReader(t *testing.T) { + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := NewRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + require.NoError(t, err) + + resstr := strings.TrimSuffix(string(data), "\n") + assert.Equal(t, srvtxt, resstr) +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + require.NoError(t, err) + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + require.NoError(t, err) + + resreq := NewRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + require.NoError(t, err) + + resstr := strings.TrimSuffix(string(data), "\n") + assert.Equal(t, srvtxt, resstr) +} diff --git a/registry/service.go b/registry/service.go new file mode 100644 index 0000000..34e8a13 --- /dev/null +++ b/registry/service.go @@ -0,0 +1,327 @@ +package registry + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +const ( + // DefaultSearchLimit is the default value for maximum number of returned search results. + DefaultSearchLimit = 25 +) + +// Service is the interface defining what a registry service should implement. +type Service interface { + Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) + LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) + LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) + ResolveRepository(name reference.Named) (*RepositoryInfo, error) + Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) + ServiceConfig() *registrytypes.ServiceConfig + TLSConfig(hostname string) (*tls.Config, error) + LoadAllowNondistributableArtifacts([]string) error + LoadMirrors([]string) error + LoadInsecureRegistries([]string) error +} + +// DefaultService is a registry service. It tracks configuration data such as a list +// of mirrors. +type DefaultService struct { + config *serviceConfig + mu sync.Mutex +} + +// NewService returns a new instance of DefaultService ready to be +// installed into an engine. +func NewService(options ServiceOptions) *DefaultService { + return &DefaultService{ + config: newServiceConfig(options), + } +} + +// ServiceConfig returns the public registry service configuration. +func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { + s.mu.Lock() + defer s.mu.Unlock() + + servConfig := registrytypes.ServiceConfig{ + AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), + AllowNondistributableArtifactsHostnames: make([]string, 0), + InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), + IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), + Mirrors: make([]string, 0), + } + + // construct a new ServiceConfig which will not retrieve s.Config directly, + // and look up items in s.config with mu locked + servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) + servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) + servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) + + for key, value := range s.config.ServiceConfig.IndexConfigs { + servConfig.IndexConfigs[key] = value + } + + servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) + + return &servConfig +} + +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. +func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadAllowNondistributableArtifacts(registries) +} + +// LoadMirrors loads registry mirrors for Service +func (s *DefaultService) LoadMirrors(mirrors []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadMirrors(mirrors) +} + +// LoadInsecureRegistries loads insecure registries for Service +func (s *DefaultService) LoadInsecureRegistries(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadInsecureRegistries(registries) +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was successful. +// It can be used to verify the validity of a client's credentials. +func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { + // TODO Use ctx when searching for repositories + serverAddress := authConfig.ServerAddress + if serverAddress == "" { + serverAddress = IndexServer + } + if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { + serverAddress = "https://" + serverAddress + } + u, err := url.Parse(serverAddress) + if err != nil { + return "", "", fmt.Errorf("unable to parse server address: %v", err) + } + + endpoints, err := s.LookupPushEndpoints(u.Host) + if err != nil { + return "", "", err + } + + for _, endpoint := range endpoints { + login := loginV2 + if endpoint.Version == APIVersion1 { + login = loginV1 + } + + status, token, err = login(authConfig, endpoint, userAgent) + if err == nil { + return + } + if fErr, ok := err.(fallbackError); ok { + err = fErr.err + logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) + continue + } + return "", "", err + } + + return "", "", err +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + // TODO Use ctx when searching for repositories + if err := validateNoScheme(term); err != nil { + return nil, err + } + + indexName, remoteName := splitReposSearchTerm(term) + + // Search is a long-running operation, just lock s.config to avoid block others. + s.mu.Lock() + index, err := newIndexInfo(s.config, indexName) + s.mu.Unlock() + + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) + if err != nil { + return nil, err + } + + var client *http.Client + if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { + creds := NewStaticCredentialStore(authConfig) + scopes := []auth.Scope{ + auth.RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + } + + modifiers := DockerHeaders(userAgent, nil) + v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + if err != nil { + if fErr, ok := err.(fallbackError); ok { + logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) + } else { + return nil, err + } + } else if foundV2 { + // Copy non transport http client features + v2Client.Timeout = endpoint.client.Timeout + v2Client.CheckRedirect = endpoint.client.CheckRedirect + v2Client.Jar = endpoint.client.Jar + + logrus.Debugf("using v2 client for search to %s", endpoint.URL) + client = v2Client + } + } + + if client == nil { + client = endpoint.client + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + } + + r := newSession(client, authConfig, endpoint) + + if index.Official { + localName := remoteName + if strings.HasPrefix(localName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + localName = strings.SplitN(localName, "/", 2)[1] + } + + return r.SearchRepositories(localName, limit) + } + return r.SearchRepositories(remoteName, limit) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + s.mu.Lock() + defer s.mu.Unlock() + return newRepositoryInfo(s.config, name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL *url.URL + Version APIVersion + AllowNondistributableArtifacts bool + Official bool + TrimHostname bool + TLSConfig *tls.Config +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { + s.mu.Lock() + defer s.mu.Unlock() + + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +// tlsConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { + return s.tlsConfig(mirrorURL.Host) +} + +// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. +// It gives preference to v2 endpoints over v1, mirrors over the actual +// registry, and HTTPS over plain HTTP. +func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + return s.lookupEndpoints(hostname) +} + +// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. +// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. +// Mirrors are not included. +func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + allEndpoints, err := s.lookupEndpoints(hostname) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) + } + } + } + return endpoints, err +} + +func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(hostname) + if err != nil { + return nil, err + } + + if s.config.V2Only { + return endpoints, nil + } + + legacyEndpoints, err := s.lookupV1Endpoints(hostname) + if err != nil { + return nil, err + } + endpoints = append(endpoints, legacyEndpoints...) + + return endpoints, nil +} diff --git a/registry/service_v1.go b/registry/service_v1.go new file mode 100644 index 0000000..1d251ae --- /dev/null +++ b/registry/service_v1.go @@ -0,0 +1,40 @@ +package registry + +import "net/url" + +func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { + return []APIEndpoint{}, nil + } + + tlsConfig, err := s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ // or this + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + return endpoints, nil +} diff --git a/registry/service_v1_test.go b/registry/service_v1_test.go new file mode 100644 index 0000000..bd15dff --- /dev/null +++ b/registry/service_v1_test.go @@ -0,0 +1,23 @@ +package registry + +import "testing" + +func TestLookupV1Endpoints(t *testing.T) { + s := NewService(ServiceOptions{}) + + cases := []struct { + hostname string + expectedLen int + }{ + {"example.com", 1}, + {DefaultNamespace, 0}, + {DefaultV2Registry.Host, 0}, + {IndexHostname, 0}, + } + + for _, c := range cases { + if ret, err := s.lookupV1Endpoints(c.hostname); err != nil || len(ret) != c.expectedLen { + t.Errorf("lookupV1Endpoints(`"+c.hostname+"`) returned %+v and %+v", ret, err) + } + } +} diff --git a/registry/service_v2.go b/registry/service_v2.go new file mode 100644 index 0000000..68466f8 --- /dev/null +++ b/registry/service_v2.go @@ -0,0 +1,82 @@ +package registry + +import ( + "net/url" + "strings" + + "github.com/docker/go-connections/tlsconfig" +) + +func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + tlsConfig := tlsconfig.ServerDefault() + if hostname == DefaultNamespace || hostname == IndexHostname { + // v2 mirrors + for _, mirror := range s.config.Mirrors { + if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { + mirror = "https://" + mirror + } + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirrorURL, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + ana := allowNondistributableArtifacts(s.config, hostname) + + tlsConfig, err = s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/registry/session.go b/registry/session.go new file mode 100644 index 0000000..9d7f321 --- /dev/null +++ b/registry/session.go @@ -0,0 +1,778 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + "errors" + "sync" + // this is required for some certificates + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry/resumable" +) + +var ( + // ErrRepoNotFound is returned if the repository didn't exist on the + // remote side + ErrRepoNotFound = errors.New("Repository not found") +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *V1Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *types.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *types.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes an HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + delete(tr.modReq, orig) + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return nil +} + +func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { + return &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + + return newSession(client, authConfig, endpoint), nil +} + +// ID returns this registry session's ID. +func (r *Session) ID() string { + return r.id +} + +// GetRemoteHistory retrieves the history of a given image from the registry. +// It returns a list of the parent's JSON files (including the requested image). +func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + return nil, newJSONError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + var history []string + if err := json.NewDecoder(res.Body).Decode(&history); err != nil { + return nil, fmt.Errorf("Error while reading the http response: %v", err) + } + + logrus.Debugf("Ancestry: %v", history) + return history, nil +} + +// LookupRemoteImage checks if an image exists in the registry +func (r *Session) LookupRemoteImage(imgID, registry string) error { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 { + return newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + return nil +} + +// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := int64(-1) + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.ParseInt(hdr, 10, 64) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +// GetRemoteImageLayer retrieves an image layer from the registry +func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { + var ( + statusCode = 0 + res *http.Response + err error + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := http.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + + res, err = r.client.Do(req) + if err != nil { + logrus.Debugf("Error contacting registry %s: %v", registry, err) + // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + logrus.Debug("server supports resume") + return resumable.NewRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + } + logrus.Debug("server doesn't support resume") + return res.Body, nil +} + +// GetRemoteTag retrieves the tag named in the askedTag argument from the given +// repository. It queries each of the registries supplied in the registries +// argument, and returns data from the first one that answers the query +// successfully. +func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { + repository := reference.Path(repositoryRef) + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) + res, err := r.client.Get(endpoint) + if err != nil { + return "", err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return "", ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + var tagID string + if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { + return "", err + } + return tagID, nil + } + return "", fmt.Errorf("Could not reach any registry endpoint") +} + +// GetRemoteTags retrieves all tags from the given repository. It queries each +// of the registries supplied in the registries argument, and returns data from +// the first one that answers the query successfully. It returns a map with +// tag names as the keys and image IDs as the values. +func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { + repository := reference.Path(repositoryRef) + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + res, err := r.client.Get(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return nil, ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + result := make(map[string]string) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedURL, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedURL.Scheme + // The registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +// GetRepositoryData returns lists of images and endpoints for the repository +func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), reference.Path(name)) + + logrus.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := http.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + // check if the error is because of i/o timeout + // and return a non-obtuse error message for users + // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" + // was a top search on the docker user forum + if isTimeout(err) { + return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + } + return nil, fmt.Errorf("Error while pulling image: %v", err) + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode == 404 { + return nil, newJSONError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res) + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + remoteChecksums := []*ImgData{} + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData, len(remoteChecksums)) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + }, nil +} + +// PushImageChecksumRegistry uploads checksums for an image +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { + u := registry + "images/" + imgData.ID + "/checksum" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, nil) + if err != nil { + return err + } + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %v", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.client.Jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) + } + return nil +} + +// PushImageJSONRegistry pushes JSON metadata for a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { + + u := registry + "images/" + imgData.ID + "/json" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return newJSONError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + } + return nil +} + +// PushImageLayerRegistry sends the checksum of an image layer to the registry +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + u := registry + "images/" + imgID + "/layer" + + logrus.Debugf("[registry] Calling PUT %s", u) + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := http.NewRequest("PUT", u, checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + res, err := r.client.Do(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %v", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", newJSONError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// PushRegistryTag pushes a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", reference.Path(remote), tag) + + req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + req.ContentLength = int64(len(revision)) + res, err := r.client.Do(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return newJSONError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res) + } + return nil +} + +// PushImageJSONIndex uploads an image list to the repository +func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), reference.Path(remote), suffix) + logrus.Debugf("[registry] PUT %s", u) + logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) + headers := map[string][]string{ + "Content-type": {"application/json"}, + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + "X-Docker-Token": {"true"}, + } + if validate { + headers["X-Docker-Endpoints"] = regs + } + + // Redirect if necessary + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { + return nil, err + } + if !shouldRedirect(res) { + break + } + res.Body.Close() + u = res.Header.Get("Location") + logrus.Debugf("Redirected to %s", u) + } + defer res.Body.Close() + + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res) + } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) + + if res.Header.Get("X-Docker-Endpoints") == "" { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res) + } + } + + return &RepositoryData{ + Endpoints: endpoints, + }, nil +} + +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, err := r.client.Do(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { + if limit < 1 || limit > 100 { + return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) + } + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, newJSONError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + } + result := new(registrytypes.SearchResults) + return result, json.NewDecoder(res.Body).Decode(result) +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} + +func newJSONError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} diff --git a/registry/types.go b/registry/types.go new file mode 100644 index 0000000..0c3cbd6 --- /dev/null +++ b/registry/types.go @@ -0,0 +1,70 @@ +package registry + +import ( + "github.com/docker/distribution/reference" + registrytypes "github.com/docker/docker/api/types/registry" +) + +// RepositoryData tracks the image list, list of endpoints for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in an HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +// API Version identifiers. +const ( + _ = iota + APIVersion1 APIVersion = iota + APIVersion2 +) + +var apiVersions = map[APIVersion]string{ + APIVersion1: "v1", + APIVersion2: "v2", +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + Name reference.Named + // Index points to registry information + Index *registrytypes.IndexInfo + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool + // Class represents the class of the repository, such as "plugin" + // or "image". + Class string +} diff --git a/reports/2017-05-01.md b/reports/2017-05-01.md new file mode 100644 index 0000000..366f4fc --- /dev/null +++ b/reports/2017-05-01.md @@ -0,0 +1,35 @@ +# Development Report for May 01, 2017 + +This is the 1st report, since the Moby project was announced at DockerCon. Thank you to everyone that stayed an extra day to attend the summit on Thursday. + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com/) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The moby tool + +The moby tool currently lives at [https://github.com/moby/tool](https://github.com/moby/tool), it's only a temporary place and will soon be merged in [https://github.com/moby/moby](https://github.com/moby/moby). + +### The CLI split + +Ongoing work to split the Docker CLI into [https://github.com/docker/cli](https://github.com/docker/cli) is happening [here](https://github.com/moby/moby/pull/32694). +We are almost done, it should be merged soon. + +### Mailing list + +Slack works great for synchronous communication, but we need to place for async discussion. A mailing list is currently being setup. + +### Find a good and non-confusing home for the remaining monolith + +Lots of discussion and progress made on this topic, see [here](https://github.com/moby/moby/issues/32871). The work will start this week. + +## Componentization + +So far only work on the builder happened regarding the componentization effort. + +### builder + +The builder dev report can be found [here](builder/2017-05-01.md) diff --git a/reports/2017-05-08.md b/reports/2017-05-08.md new file mode 100644 index 0000000..86d011a --- /dev/null +++ b/reports/2017-05-08.md @@ -0,0 +1,34 @@ +# Development Report for May 08, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The CLI split + +The Docker CLI was succesfully moved to [https://github.com/docker/cli](https://github.com/docker/cli) last week thanks to @tiborvass +The Docker CLI is now compiled from the [Dockerfile](https://github.com/moby/moby/blob/a762ceace4e8c1c7ce4fb582789af9d8074be3e1/Dockerfile#L248) + +### Mailing list + +Discourse is available at [forums.mobyproject.org](https://forums.mobyproject.org/) thanks to @thaJeztah. mailing-list mode is enabled, so once you register there, you will received every new threads / messages via email. So far, 3 categories were created: Architecture, Meta & Support. The last step missing is to setup an email address to be able to start a new thread via email. + +### Find a place for `/pkg` + +Lots of discussion and progress made on this [topic](https://github.com/moby/moby/issues/32989) thanks to @dnephin. [Here is the list](https://gist.github.com/dnephin/35dc10f6b6b7017f058a71908b301d38) proposed to split/reorganize the pkgs. + +### Find a good and non-confusing home for the remaining monolith + +@cpuguy83 is leading the effort [here](https://github.com/moby/moby/pull/33022). It's still WIP but the way we are experimenting with is to reorganise directories within the moby/moby. + +## Componentization + +So far only work on the builder, by @tonistiigi, happened regarding the componentization effort. + +### builder + +The builder dev report can be found [here](builder/2017-05-08.md) + diff --git a/reports/2017-05-15.md b/reports/2017-05-15.md new file mode 100644 index 0000000..372630c --- /dev/null +++ b/reports/2017-05-15.md @@ -0,0 +1,52 @@ +# Development Report for May 15, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The CLI split + +Work is in progress to move the "opts" package to the docker/cli repository. The package, was merged into the docker/cli +repository through [docker/cli#82](https://github.com/docker/cli/pull/82), preserving Git history, and parts that are not +used in Moby have been removed through [moby/moby#33198](https://github.com/moby/moby/pull/33198). + +### Find a good and non-confusing home for the remaining monolith + +Discussion on this topic is still ongoing, and possible approaches are looked into. The active discussion has moved +from GitHub to [https://forums.mobyproject.org/](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith/37) + +### Find a place for `/pkg` + +Concerns were raised about moving packages to separate repositories, and it was decided to put some extra effort into +breaking up / removing existing packages that likely are not good candidates to become a standalone project. + +### Update integration-cli tests + +With the removal of the CLI from the moby repository, new pull requests will have to be tested using API tests instead +of using the CLI. Discussion took place whether or not these tests should use the API `client` package, or be completely +independend, and make raw HTTP calls. + +A topic was created on the forum to discuss options: [evolution of testing](https://forums.mobyproject.org/t/evolution-of-testing-moby/38) + + +### Proposal: split & containerize hack/validate + +[@AkihiroSuda](https://github.com/AkihiroSuda) is proposing to split and containerize the `hack/validate` script and +[started a topic on the forum](https://forums.mobyproject.org/t/proposal-split-containerize-hack-validate/32). An initial +proposal to add validation functionality to `vndr` (the vendoring tool in use) was rejected upstream, so alternative +approaches were discussed. + + +### Special Interest Groups + +A "SIG" category was created on the forums to provide a home for Special Interest Groups. The first SIG, [LinuxKit +Security](https://forums.mobyproject.org/t/about-the-linuxkit-security-category/44) was started (thanks +[@riyazdf](https://github.com/riyazdf)). + + +### Builder + +The builder dev report can be found [here](builder/2017-05-15.md) diff --git a/reports/2017-06-05.md b/reports/2017-06-05.md new file mode 100644 index 0000000..8e2cc3c --- /dev/null +++ b/reports/2017-06-05.md @@ -0,0 +1,36 @@ +# Development Report for June 5, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +Lots of discussion happened during this meeting to kickstart the project, but now that we have the forums, we see less activity there. +We are discussing the future of this meeting [here](https://forums.mobyproject.org/t/of-standups-future), we will possibily move the meeting +to weekly. + +## Topics discussed last week + +### The CLI split + +Thanks to @tiborvass, the man pages, docs and completion scripts were imported to `github.com/docker/cli` [last week](https://github.com/docker/cli/pull/147) +Once everything is finalised, we will remove them from `github.com/moby/moby` + +### Find a good and non-confusing home for the remaining monolith + +Discussion on this topic is still ongoing, and possible approaches are looked into. The active discussion has moved +from GitHub to [https://forums.mobyproject.org/](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith) + + +### Find a place for `/pkg` + +Thanks to @dnephin this topic in on-going, you can follow progress [here](https://github.com/moby/moby/issues/32989) +Many pkgs were reorganised last week, and more to come this week. + + +### Builder + +The builder dev report can be found [here](builder/2017-06-05.md) + + +### LinuxKit + +The LinuxKit dev report can be found [here](https://github.com/linuxkit/linuxkit/blob/master/reports/2017-06-03.md) \ No newline at end of file diff --git a/reports/2017-06-12.md b/reports/2017-06-12.md new file mode 100644 index 0000000..8aef38c --- /dev/null +++ b/reports/2017-06-12.md @@ -0,0 +1,78 @@ +# Development Report for June 12, 2017 + +## Moby Summit + +The next Moby Summit will be at Docker HQ on June 19th, register [here](https://www.eventbrite.com/e/moby-summit-tickets-34483396768) + +## Daily Meeting + +### The CLI split + +Manpages and docs yaml files can now be generated on [docker/cli](https://github.com/docker/cli). +Man pages, docs and completion scripts will be removed next week thanks to @tiborvass + +### Find a good and non-confusing home for the remaining monolith + +Lot's of dicussion happened on the [forums](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith) +We should expect to do those changes after the moby summit. We contacted github to work with them so we have a smooth move. + +### Moby tool + +`moby` tool docs were moved from [LinuxKit](https://github.com/linuxkit/linuxkit) to the [moby tool repo](https://github.com/moby/tool) thanks to @justincormack + +### Custom golang URLs + +More discussions on the [forums](https://forums.mobyproject.org/t/cutoms-golang-urls), no agreement for now. + +### Buildkit + +[Proposal](https://github.com/moby/moby/issues/32925) + +More updates to the [POC repo](https://github.com/tonistiigi/buildkit_poc). It now contains binaries for the daemon and client. Examples directory shows a way for invoking a build job by generating the internal low-level build graph definition with a helper binary(as there is not support for frontends yet). The grpc control server binary can be built in two versions, one that connects to containerD socket and other that doesn't have any external dependencies. + +If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +#### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +New PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers. + +#### Long running session & incremental file sending + +[PR ](https://github.com/moby/moby/pull/32677) + +Same status as last week. The PR went through one pass of review from @dnephin and has been rebased again. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +#### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR is waiting for a second review. + +#### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +#### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +#### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. diff --git a/reports/builder/2017-05-01.md b/reports/builder/2017-05-01.md new file mode 100644 index 0000000..73d1c49 --- /dev/null +++ b/reports/builder/2017-05-01.md @@ -0,0 +1,47 @@ +# Development Report for May 01, 2017 + +### buildkit + +As part of the goals of [Moby](https://github.com/moby/moby#transitioning-to-moby) to split the current platform into reusable components and to provide a future vision for the builder component new [buildkit proposal](https://github.com/moby/moby/issues/32925) was opened with early design draft. + +Buildkit is a library providing the core essentials of running a build process using isolated sandboxed commands. It is designed for extensibility and customization. Buildkit supports multiple build declaration formats(frontends) and multiple ways for outputting build results(not just docker images). It doesn't make decisions for a specific worker, snapshot or exporter implementations. + +It is designed to help find the most efficient way to process build tasks and intelligently cache them for repeated invocations. + +### Quality: Dependency interface switch + +To improve quality and performance, a new [proposal was made for switching the dependency interface](https://github.com/moby/moby/issues/32904) for current builder package. That should fix the current problems with data leakage and conflicts caused by daemon state cleanup scripts. + +@dnephin is in progress of refactoring current builder code to logical areas as a preparation work for updating this interface. + +Merged as part of this effort: + +- [Refactor Dockerfile.parser and directive](https://github.com/moby/moby/pull/32580) +- [Refactor builder dispatch state](https://github.com/moby/moby/pull/32600) +- [Use a bytes.Buffer for shell_words string concat](https://github.com/moby/moby/pull/32601) +- [Refactor `Builder.commit()`](https://github.com/moby/moby/pull/32772) +- [Remove b.escapeToken, create ShellLex](https://github.com/moby/moby/pull/32858) + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enabled advanced features like incremental context send, build credentials from the client, ssh forwarding etc. is looking for initial design review. It is currently open if features implemented on top of it would use a specific transport implementation on the wire or a generic interface(current implementation). @tonistiigi is working on adding persistent cache capabilities that are currently missing from that PR. It also needs to be figured out how the [cli split](https://github.com/moby/moby/pull/32694) will affect features like this. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +These proposals have gotten mostly positive feedback for now. We will leave them open for a couple of more weeks and then decide what actions to take in a maintainers meeting. Also, if you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[`docker build --iidfile` to capture the ID of the build result](https://github.com/moby/moby/pull/32406) + +[Allow builds from any git remote ref](https://github.com/moby/moby/pull/32502) + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/reports/builder/2017-05-08.md b/reports/builder/2017-05-08.md new file mode 100644 index 0000000..d9396ab --- /dev/null +++ b/reports/builder/2017-05-08.md @@ -0,0 +1,57 @@ +# Development Report for May 08, 2017 + + +### Quality: Dependency interface switch + +Proposal for [switching the dependency interface](https://github.com/moby/moby/issues/32904) for current builder package. That should fix the current problems with data leakage and conflicts caused by daemon state cleanup scripts. + +Merged as part of this effort: + +- [Move dispatch state to a new struct](https://github.com/moby/moby/pull/32952) +- [Cleanup unnecessary mutate then revert of b.runConfig](https://github.com/moby/moby/pull/32773) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) +- [Expose GetImage interface for builder](https://github.com/moby/moby/pull/33054) + +### Merged: docker build --iidfile + +[`docker build --iidfile` to capture the ID of the build result](https://github.com/moby/moby/pull/32406). New option can be used by the CLI applications to get back the image ID of build result. API users can use the `Aux` messages in progress stream to also get the IDs for intermediate build stages, for example to share them for build cache. + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. + +@simonferquel proposed a [grpc-only version of that interface](https://github.com/moby/moby/pull/33047) that should simplify the setup needed for describing new features for the session. Looking for design reviews. + +The feature also needs to be reworked after CLI split. + +### buildkit + +Not much progress [apart from some design discussion](https://github.com/moby/moby/issues/32925). Next step would be to open up a repo. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[Allow builds from any git remote ref](https://github.com/moby/moby/pull/32502) + +[Fix a case where using FROM scratch as NAME would fail](https://github.com/moby/moby/pull/32997) + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/reports/builder/2017-05-15.md b/reports/builder/2017-05-15.md new file mode 100644 index 0000000..cfc742f --- /dev/null +++ b/reports/builder/2017-05-15.md @@ -0,0 +1,64 @@ +# Development Report for May 15, 2017 + +### Multi-stage builds fixes coming in 17.06-rc1 + +Some bugs were discovered in new multi-stage build feature, release in 17.05. + +When using an image name directly in `COPY --from` without defining a build stage, the data associated with that image was not properly cleaned up. + +If a second was based on `scratch` image, the metadata from the previous stage didn't get reset, forcing the user to clear it manually with extra commands. + +Fixes for these are merged for the next release, everyone is welcomed to test it once `17.06-rc1` is out. + +- [Fix resetting image metadata between stages for scratch case](https://github.com/moby/moby/pull/33179) +- [Fix releasing implicit mounts](https://github.com/moby/moby/pull/33090) +- [Fix a case where using FROM scratch as NAME would fail](https://github.com/moby/moby/pull/32997) + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. This week methods for getting access to source image were swapped out to a new version that keeps a reference to image data until build job has complete. + +Merged as part of this effort: + +- [Expose GetImage interface for builder](https://github.com/moby/moby/pull/33054) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) +- [Refactor COPY/ADD dispatchers](https://github.com/moby/moby/pull/33116) + + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. + +@simonferquel updated a [grpc-only version of that interface](https://github.com/moby/moby/pull/33047) and mostly seems that consensus was achieved for using only grpc transport. @tonistiigi finished up persistent cache layer and garbage collection for file transfers. The PR now needs to be split up because CLI has moved. Once that is done, the main PR should be ready for review early this week. + +### Merged: Specifying any remote ref in git checkout URLs + +Building from git sources now allows [specifying any remote ref](https://github.com/moby/moby/pull/32502). For example, to build a pull request from GitHub you can use: `docker build git://github.com/moby/moby#pull/32502/head`. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +- + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/reports/builder/2017-05-22.md b/reports/builder/2017-05-22.md new file mode 100644 index 0000000..29ecc6b --- /dev/null +++ b/reports/builder/2017-05-22.md @@ -0,0 +1,47 @@ +# Development Report for May 22, 2017 + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. is ready for reviews. This is blocking many new features like token signing, not pulling unnecessary context files, exposing sources outside working directory etc. + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +Merged as part of this effort this week: + +- [Refactor COPY/ADD dispatchers](https://github.com/moby/moby/pull/33116) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) + +### Buildkit + +[Diff and snapshot services](https://github.com/containerd/containerd/pull/849) were added to containerd. This is a required dependency for [buildkit](https://github.com/moby/moby/issues/32925). + +### Proposals discussed in maintainers meeting + +New builder proposals were discussed in maintainers meeting. The decision was to give 2 more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. + +Build secrets and its possible overlap with [--mount](https://github.com/moby/moby/issues/32507) was discussed as well. The decision was to create a [new issue](https://github.com/moby/moby/issues/33343)(as the [old PR](https://github.com/moby/moby/pull/30637) is closed) to track this and avoid it from blocking `--mount` implementation. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +- diff --git a/reports/builder/2017-05-29.md b/reports/builder/2017-05-29.md new file mode 100644 index 0000000..33043d9 --- /dev/null +++ b/reports/builder/2017-05-29.md @@ -0,0 +1,52 @@ +# Development Report for May 29, 2017 + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding, etc. is ready for reviews. It is blocking many new features like the token signing, not pulling unnecessary context files, exposing sources outside working directory, etc. Maintainers are encouraged to give this one a review! + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +Merged as part of this effort this week: + +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) + +@dnephin continues working on the copy/export aspects of the interface. + +### Buildkit + +Some initial proof of concept code for [buildkit](https://github.com/moby/moby/issues/32925) has been pushed to https://github.com/tonistiigi/buildkit_poc . It's in a very early exploratory stage. Current development has been about providing concurrent references based access to the snapshot data that is backed by containerd. More info should follow in next weeks, including hopefully opening up an official repo. If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Proposals discussed in maintainers meeting + +Reminder from last week: New builder proposals were discussed in maintainers meeting. The decision was to give 2 more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. + +New issue about [build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality please make yourself heard. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[Fix canceling builder on chunked requests](https://github.com/moby/moby/pull/33363) + +[Fix parser directive refactoring](https://github.com/moby/moby/pull/33436) + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) \ No newline at end of file diff --git a/reports/builder/2017-06-05.md b/reports/builder/2017-06-05.md new file mode 100644 index 0000000..3746c26 --- /dev/null +++ b/reports/builder/2017-06-05.md @@ -0,0 +1,58 @@ +# Development Report for June 5, 2017 + +### New feature: Long running session + +Similarly to last week, the PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) is waiting for reviews. It is blocking many new features like the token signing, not pulling unnecessary context files, exposing sources outside working directory, etc. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +PRs currently in review as part of this effort: + +- [Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) + +This PR is the core of the update that removes the need to track active containers and instead of lets builder hold references to layers while it's running. + +Related to this, @simonferquel opened a [WIP PR](https://github.com/moby/moby/pull/33492) that introduces typed Dockerfile parsing. This enables making [decisions about dependencies](https://github.com/moby/moby/issues/32550#issuecomment-297867334) between build stages and reusing Dockerfile parsing as a buildkit frontend. + +### Buildkit + +Some initial proof of concept code for [buildkit](https://github.com/moby/moby/issues/32925) has been pushed to https://github.com/tonistiigi/buildkit_poc . It's in a very early exploratory stage. Current codebase includes libraries for getting concurrency safe references to containerd snapshots using a centralized cache management instance. There is a sample source implementation for pulling images to these snapshots and executing jobs with runc on top of them. There is also some utility code for concurrent execution and progress stream handling. More info should follow in next weeks, including hopefully opening up an official repo. If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Proposals discussed in maintainers meeting + +Reminder from last week: New builder proposals were discussed in maintainers meeting. The decision was to give two more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. It is the last week to post your feedback on these proposals or the comments in them. You can also volunteer to implement them. + +A new issue about [build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Fix canceling builder on chunked requests](https://github.com/moby/moby/pull/33363) + +[Fix parser directive refactoring](https://github.com/moby/moby/pull/33436) + +### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) \ No newline at end of file diff --git a/reports/builder/2017-06-12.md b/reports/builder/2017-06-12.md new file mode 100644 index 0000000..df5d801 --- /dev/null +++ b/reports/builder/2017-06-12.md @@ -0,0 +1,58 @@ +# Development Report for June 12, 2017 + + +### Buildkit + +[Proposal](https://github.com/moby/moby/issues/32925) + +More updates to the [POC repo](https://github.com/tonistiigi/buildkit_poc). It now contains binaries for the daemon and client. Examples directory shows a way for invoking a build job by generating the internal low-level build graph definition with a helper binary(as there is not support for frontends yet). The grpc control server binary can be built in two versions, one that connects to containerD socket and other that doesn't have any external dependencies. + +If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +New PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers. + +### Long running session & incremental file sending + +[PR ](https://github.com/moby/moby/pull/32677) + +Same status as last week. The PR went through one pass of review from @dnephin and has been rebased again. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR is waiting for a second review. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + + +### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/restartmanager/restartmanager.go b/restartmanager/restartmanager.go new file mode 100644 index 0000000..45022fc --- /dev/null +++ b/restartmanager/restartmanager.go @@ -0,0 +1,134 @@ +package restartmanager + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond + maxRestartTimeout = 1 * time.Minute +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration, health types.Health) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + restartCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartManager based on a policy. +func New(policy container.RestartPolicy, restartCount int) RestartManager { + return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration, health types.Health) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on an active restart manager") + } + // if the container ran for more than 10s, regardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + switch { + case rm.timeout == 0: + rm.timeout = defaultTimeout + case rm.timeout < maxRestartTimeout: + rm.timeout *= backoffMultiplier + } + if rm.timeout > maxRestartTimeout { + rm.timeout = maxRestartTimeout + } + + var restart bool + switch { + case rm.policy.IsAlways(): + restart = true + case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { + restart = (exitCode != 0) || (health.Status == types.Unhealthy) + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + rm.restartCount++ + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/restartmanager/restartmanager_test.go b/restartmanager/restartmanager_test.go new file mode 100644 index 0000000..2b09820 --- /dev/null +++ b/restartmanager/restartmanager_test.go @@ -0,0 +1,39 @@ +package restartmanager + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +func TestRestartManagerTimeout(t *testing.T) { + health := types.Health{} + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + var duration = time.Duration(1 * time.Second) + should, _, err := rm.ShouldRestart(0, false, duration, health) + if err != nil { + t.Fatal(err) + } + if !should { + t.Fatal("container should be restarted") + } + if rm.timeout != defaultTimeout { + t.Fatalf("restart manager should have a timeout of 100 ms but has %s", rm.timeout) + } +} + +func TestRestartManagerTimeoutReset(t *testing.T) { + health := types.Health{} + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + rm.timeout = 5 * time.Second + var duration = time.Duration(10 * time.Second) + _, _, err := rm.ShouldRestart(0, false, duration, health) + if err != nil { + t.Fatal(err) + } + if rm.timeout != defaultTimeout { + t.Fatalf("restart manager should have a timeout of 100 ms but has %s", rm.timeout) + } +} diff --git a/runconfig/config.go b/runconfig/config.go new file mode 100644 index 0000000..c9dc6e9 --- /dev/null +++ b/runconfig/config.go @@ -0,0 +1,108 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/volume" +) + +// ContainerDecoder implements httputils.ContainerDecoder +// calling DecodeContainerConfig. +type ContainerDecoder struct{} + +// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + return DecodeContainerConfig(src) +} + +// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + return DecodeHostConfig(src) +} + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and a HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateMountSettings(w.Config, hc); err != nil { + return nil, nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := validateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := validateIsolation(hc); err != nil { + return nil, nil, nil, err + } + + // Validate QoS + if err := validateQoS(hc); err != nil { + return nil, nil, nil, err + } + + // Validate Resources + if err := validateResources(hc, sysinfo.New(true)); err != nil { + return nil, nil, nil, err + } + + // Validate Privileged + if err := validatePrivileged(hc); err != nil { + return nil, nil, nil, err + } + + // Validate ReadonlyRootfs + if err := validateReadonlyRootfs(hc); err != nil { + return nil, nil, nil, err + } + + return w.Config, hc, w.NetworkingConfig, nil +} + +// validateMountSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateMountSettings(c *container.Config, hc *container.HostConfig) error { + // it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 ) + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid bind mount spec %q: %v", spec, err) + } + } + + return nil +} diff --git a/runconfig/config_test.go b/runconfig/config_test.go new file mode 100644 index 0000000..83ec363 --- /dev/null +++ b/runconfig/config_test.go @@ -0,0 +1,139 @@ +package runconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" +) + +type f struct { + file string + entrypoint strslice.StrSlice +} + +func TestDecodeContainerConfig(t *testing.T) { + + var ( + fixtures []f + image string + ) + + //TODO: Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + + if runtime.GOOS != "windows" { + image = "ubuntu" + fixtures = []f{ + {"fixtures/unix/container_config_1_14.json", strslice.StrSlice{}}, + {"fixtures/unix/container_config_1_17.json", strslice.StrSlice{"bash"}}, + {"fixtures/unix/container_config_1_19.json", strslice.StrSlice{"bash"}}, + } + } else { + image = "windows" + fixtures = []f{ + {"fixtures/windows/container_config_1_19.json", strslice.StrSlice{"cmd"}}, + } + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Image != image { + t.Fatalf("Expected %s image, found %s\n", image, c.Image) + } + + if len(c.Entrypoint) != len(f.entrypoint) { + t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) + } + + if h != nil && h.Memory != 1000 { + t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) + } + } +} + +// TestDecodeContainerConfigIsolation validates isolation passed +// to the daemon in the hostConfig structure. Note this is platform specific +// as to what level of container isolation is supported. +func TestDecodeContainerConfigIsolation(t *testing.T) { + + // An Invalid isolation level + if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { + if !strings.Contains(err.Error(), `Invalid isolation: "invalid"`) { + t.Fatal(err) + } + } + + // Blank isolation (== default) + if _, _, _, err := callDecodeContainerConfigIsolation(""); err != nil { + t.Fatal("Blank isolation should have succeeded") + } + + // Default isolation + if _, _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { + t.Fatal("default isolation should have succeeded") + } + + // Process isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + t.Fatal("process isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + if !strings.Contains(err.Error(), `Invalid isolation: "process"`) { + t.Fatal(err) + } + } + } + + // Hyper-V Containers isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + t.Fatal("hyperv isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + if !strings.Contains(err.Error(), `Invalid isolation: "hyperv"`) { + t.Fatal(err) + } + } + } +} + +// callDecodeContainerConfigIsolation is a utility function to call +// DecodeContainerConfig for validating isolation +func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var ( + b []byte + err error + ) + w := ContainerConfigWrapper{ + Config: &container.Config{}, + HostConfig: &container.HostConfig{ + NetworkMode: "none", + Isolation: container.Isolation(isolation)}, + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + return DecodeContainerConfig(bytes.NewReader(b)) +} diff --git a/runconfig/config_unix.go b/runconfig/config_unix.go new file mode 100644 index 0000000..b4fbfb2 --- /dev/null +++ b/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/runconfig/config_windows.go b/runconfig/config_windows.go new file mode 100644 index 0000000..f2361b5 --- /dev/null +++ b/runconfig/config_windows.go @@ -0,0 +1,19 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + HostConfig *container.HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + return w.HostConfig +} diff --git a/runconfig/errors.go b/runconfig/errors.go new file mode 100644 index 0000000..c95a291 --- /dev/null +++ b/runconfig/errors.go @@ -0,0 +1,38 @@ +package runconfig + +import ( + "fmt" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("user specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("user specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("conflicting options: hostname and the UTS mode") +) diff --git a/runconfig/fixtures/unix/container_config_1_14.json b/runconfig/fixtures/unix/container_config_1_14.json new file mode 100644 index 0000000..b08334c --- /dev/null +++ b/runconfig/fixtures/unix/container_config_1_14.json @@ -0,0 +1,30 @@ +{ + "Hostname":"", + "Domainname": "", + "User":"", + "Memory": 1000, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "bash" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } +} diff --git a/runconfig/fixtures/unix/container_config_1_17.json b/runconfig/fixtures/unix/container_config_1_17.json new file mode 100644 index 0000000..0d78087 --- /dev/null +++ b/runconfig/fixtures/unix/container_config_1_17.json @@ -0,0 +1,50 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpt": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } +} diff --git a/runconfig/fixtures/unix/container_config_1_19.json b/runconfig/fixtures/unix/container_config_1_19.json new file mode 100644 index 0000000..de49cf3 --- /dev/null +++ b/runconfig/fixtures/unix/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/runconfig/fixtures/unix/container_hostconfig_1_14.json b/runconfig/fixtures/unix/container_hostconfig_1_14.json new file mode 100644 index 0000000..c72ac91 --- /dev/null +++ b/runconfig/fixtures/unix/container_hostconfig_1_14.json @@ -0,0 +1,18 @@ +{ + "Binds": ["/tmp:/tmp"], + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] +} diff --git a/runconfig/fixtures/unix/container_hostconfig_1_19.json b/runconfig/fixtures/unix/container_hostconfig_1_19.json new file mode 100644 index 0000000..5ca8aa7 --- /dev/null +++ b/runconfig/fixtures/unix/container_hostconfig_1_19.json @@ -0,0 +1,30 @@ +{ + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" +} diff --git a/runconfig/fixtures/windows/container_config_1_19.json b/runconfig/fixtures/windows/container_config_1_19.json new file mode 100644 index 0000000..724320c --- /dev/null +++ b/runconfig/fixtures/windows/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "cmd", + "Image": "windows", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "c:/windows": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["c:/windows:d:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "default", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go new file mode 100644 index 0000000..24aed19 --- /dev/null +++ b/runconfig/hostconfig.go @@ -0,0 +1,80 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } +} + +// validateNetContainerMode ensures that the various combinations of requested +// network settings wrt container mode are valid. +func validateNetContainerMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("Invalid network mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} diff --git a/runconfig/hostconfig_solaris.go b/runconfig/hostconfig_solaris.go new file mode 100644 index 0000000..5b6e13d --- /dev/null +++ b/runconfig/hostconfig_solaris.go @@ -0,0 +1,46 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + return nil +} + +// validateIsolation performs platform specific validation of the +// isolation level in the hostconfig structure. +// This setting is currently discarded for Solaris so this is a no-op. +func validateIsolation(hc *container.HostConfig) error { + return nil +} + +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { + return nil +} + +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} diff --git a/runconfig/hostconfig_test.go b/runconfig/hostconfig_test.go new file mode 100644 index 0000000..7f39cf5 --- /dev/null +++ b/runconfig/hostconfig_test.go @@ -0,0 +1,283 @@ +// +build !windows + +package runconfig + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// TODO Windows: This will need addressing for a Windows daemon. +func TestNetworkModeTest(t *testing.T) { + networkModes := map[container.NetworkMode][]bool{ + // private, bridge, host, container, none, default + "": {true, false, false, false, false, false}, + "something:weird": {true, false, false, false, false, false}, + "bridge": {true, true, false, false, false, false}, + DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, + "host": {false, false, true, false, false, false}, + "container:name": {false, false, false, true, false, false}, + "none": {true, false, false, false, true, false}, + "default": {true, false, false, false, false, true}, + } + networkModeNames := map[container.NetworkMode]string{ + "": "", + "something:weird": "something:weird", + "bridge": "bridge", + DefaultDaemonNetworkMode(): "bridge", + "host": "host", + "container:name": "container", + "none": "none", + "default": "default", + } + for networkMode, state := range networkModes { + if networkMode.IsPrivate() != state[0] { + t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) + } + if networkMode.IsBridge() != state[1] { + t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) + } + if networkMode.IsHost() != state[2] { + t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) + } + if networkMode.IsContainer() != state[3] { + t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) + } + if networkMode.IsNone() != state[4] { + t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) + } + if networkMode.IsDefault() != state[5] { + t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) + } + if networkMode.NetworkName() != networkModeNames[networkMode] { + t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) + } + } +} + +func TestIpcModeTest(t *testing.T) { + ipcModes := map[container.IpcMode][]bool{ + // private, host, container, valid + "": {true, false, false, true}, + "something:weird": {true, false, false, false}, + ":weird": {true, false, false, true}, + "host": {false, true, false, true}, + "container:name": {false, false, true, true}, + "container:name:something": {false, false, true, false}, + "container:": {false, false, true, false}, + } + for ipcMode, state := range ipcModes { + if ipcMode.IsPrivate() != state[0] { + t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) + } + if ipcMode.IsHost() != state[1] { + t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) + } + if ipcMode.IsContainer() != state[2] { + t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) + } + if ipcMode.Valid() != state[3] { + t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) + } + } + containerIpcModes := map[container.IpcMode]string{ + "": "", + "something": "", + "something:weird": "weird", + "container": "", + "container:": "", + "container:name": "name", + "container:name1:name2": "name1:name2", + } + for ipcMode, container := range containerIpcModes { + if ipcMode.Container() != container { + t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) + } + } +} + +func TestUTSModeTest(t *testing.T) { + utsModes := map[container.UTSMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for utsMode, state := range utsModes { + if utsMode.IsPrivate() != state[0] { + t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) + } + if utsMode.IsHost() != state[1] { + t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) + } + if utsMode.Valid() != state[2] { + t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) + } + } +} + +func TestUsernsModeTest(t *testing.T) { + usrensMode := map[container.UsernsMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for usernsMode, state := range usrensMode { + if usernsMode.IsPrivate() != state[0] { + t.Fatalf("UsernsMode.IsPrivate for %v should have been %v but was %v", usernsMode, state[0], usernsMode.IsPrivate()) + } + if usernsMode.IsHost() != state[1] { + t.Fatalf("UsernsMode.IsHost for %v should have been %v but was %v", usernsMode, state[1], usernsMode.IsHost()) + } + if usernsMode.Valid() != state[2] { + t.Fatalf("UsernsMode.Valid for %v should have been %v but was %v", usernsMode, state[2], usernsMode.Valid()) + } + } +} + +func TestPidModeTest(t *testing.T) { + pidModes := map[container.PidMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for pidMode, state := range pidModes { + if pidMode.IsPrivate() != state[0] { + t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) + } + if pidMode.IsHost() != state[1] { + t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) + } + if pidMode.Valid() != state[2] { + t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) + } + } +} + +func TestRestartPolicy(t *testing.T) { + restartPolicies := map[container.RestartPolicy][]bool{ + // none, always, failure + container.RestartPolicy{}: {true, false, false}, + container.RestartPolicy{Name: "something", MaximumRetryCount: 0}: {false, false, false}, + container.RestartPolicy{Name: "no", MaximumRetryCount: 0}: {true, false, false}, + container.RestartPolicy{Name: "always", MaximumRetryCount: 0}: {false, true, false}, + container.RestartPolicy{Name: "on-failure", MaximumRetryCount: 0}: {false, false, true}, + } + for restartPolicy, state := range restartPolicies { + if restartPolicy.IsNone() != state[0] { + t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) + } + if restartPolicy.IsAlways() != state[1] { + t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) + } + if restartPolicy.IsOnFailure() != state[2] { + t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) + } + } +} +func TestDecodeHostConfig(t *testing.T) { + fixtures := []struct { + file string + }{ + {"fixtures/unix/container_hostconfig_1_14.json"}, + {"fixtures/unix/container_hostconfig_1_19.json"}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, err := DecodeHostConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Privileged != false { + t.Fatalf("Expected privileged false, found %v\n", c.Privileged) + } + + if l := len(c.Binds); l != 1 { + t.Fatalf("Expected 1 bind, found %d\n", l) + } + + if len(c.CapAdd) != 1 && c.CapAdd[0] != "NET_ADMIN" { + t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) + } + + if len(c.CapDrop) != 1 && c.CapDrop[0] != "NET_ADMIN" { + t.Fatalf("Expected CapDrop NET_ADMIN, got %v", c.CapDrop) + } + } +} + +func TestValidateResources(t *testing.T) { + type resourceTest struct { + ConfigCPURealtimePeriod int64 + ConfigCPURealtimeRuntime int64 + SysInfoCPURealtimePeriod bool + SysInfoCPURealtimeRuntime bool + ErrorExpected bool + FailureMsg string + } + + tests := []resourceTest{ + { + ConfigCPURealtimePeriod: 1000, + ConfigCPURealtimeRuntime: 1000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: false, + FailureMsg: "Expected valid configuration", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: false, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-period is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 10000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is greater than cpu-rt-period", + }, + } + + for _, rt := range tests { + var hc container.HostConfig + hc.Resources.CPURealtimePeriod = rt.ConfigCPURealtimePeriod + hc.Resources.CPURealtimeRuntime = rt.ConfigCPURealtimeRuntime + + var si sysinfo.SysInfo + si.CPURealtimePeriod = rt.SysInfoCPURealtimePeriod + si.CPURealtimeRuntime = rt.SysInfoCPURealtimeRuntime + + if err := validateResources(&hc, &si); (err != nil) != rt.ErrorExpected { + t.Fatal(rt.FailureMsg, err) + } + } +} diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go new file mode 100644 index 0000000..55df5da --- /dev/null +++ b/runconfig/hostconfig_unix.go @@ -0,0 +1,110 @@ +// +build !windows,!solaris + +package runconfig + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + return nil +} + +// validateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("Invalid isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} + +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.IOMaximumBandwidth != 0 { + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum bandwidth", runtime.GOOS) + } + + if hc.IOMaximumIOps != 0 { + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum IOPs", runtime.GOOS) + } + return nil +} + +// validateResources performs platform specific validation of the resource settings +// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { + return fmt.Errorf("Your kernel does not support cgroup cpu real-time period") + } + + if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { + return fmt.Errorf("Your kernel does not support cgroup cpu real-time runtime") + } + + if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { + return fmt.Errorf("cpu real-time runtime cannot be higher than cpu real-time period") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + return nil +} diff --git a/runconfig/hostconfig_windows.go b/runconfig/hostconfig_windows.go new file mode 100644 index 0000000..5eb956d --- /dev/null +++ b/runconfig/hostconfig_windows.go @@ -0,0 +1,96 @@ +package runconfig + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("nat") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return !container.NetworkMode(network).IsUserDefined() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() { + return fmt.Errorf("Using the network stack of another container is not supported while using Hyper-V Containers") + } + + return nil +} + +// validateIsolation performs platform specific validation of the +// isolation in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("Invalid isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} + +// validateQoS performs platform specific validation of the Qos settings +func validateQoS(hc *container.HostConfig) error { + return nil +} + +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Resources.CPURealtimePeriod != 0 { + return fmt.Errorf("Windows does not support CPU real-time period") + } + if hc.Resources.CPURealtimeRuntime != 0 { + return fmt.Errorf("Windows does not support CPU real-time runtime") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Privileged { + return fmt.Errorf("Windows does not support privileged mode") + } + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.ReadonlyRootfs { + return fmt.Errorf("Windows does not support root filesystem in read-only mode") + } + return nil +} diff --git a/runconfig/hostconfig_windows_test.go b/runconfig/hostconfig_windows_test.go new file mode 100644 index 0000000..b780dc0 --- /dev/null +++ b/runconfig/hostconfig_windows_test.go @@ -0,0 +1,17 @@ +// +build windows + +package runconfig + +import ( + "testing" + + "github.com/docker/docker/api/types/container" +) + +func TestValidatePrivileged(t *testing.T) { + expected := "Windows does not support privileged mode" + err := validatePrivileged(&container.HostConfig{Privileged: true}) + if err == nil || err.Error() != expected { + t.Fatalf("Expected %s", expected) + } +} diff --git a/runconfig/opts/parse.go b/runconfig/opts/parse.go new file mode 100644 index 0000000..a7f1b79 --- /dev/null +++ b/runconfig/opts/parse.go @@ -0,0 +1,20 @@ +package opts + +import ( + "strings" +) + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} diff --git a/vendor.conf b/vendor.conf new file mode 100644 index 0000000..6017cef --- /dev/null +++ b/vendor.conf @@ -0,0 +1,160 @@ +# the following lines are in sorted order, FYI +github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 +github.com/Microsoft/hcsshim v0.5.25 +github.com/Microsoft/go-winio v0.4.2 +github.com/Sirupsen/logrus v0.11.0 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/gorilla/context v1.1 +github.com/gorilla/mux v1.1 +github.com/jhowardmsft/opengcs v0.0.7 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.3 +github.com/tchap/go-patricia v2.2.6 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +# forked golang.org/x/net package includes a patch for lazy loading trace templates +golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 +golang.org/x/sys 237befd9ffade4114d3e63795272b7e08ea55e83 https://github.com/resin-os/golang-sys +github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 +github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d +golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 +github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/pmezard/go-difflib v1.0.0 + +github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 +github.com/imdario/mergo 0.2.1 +golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0 + +#get libnetwork packages +github.com/docker/libnetwork 1f195c9d178963adf9573b6d39445b3c168bd015 https://github.com/resin-os/balena-libnetwork +github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist v0.1.0 +github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 +github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d +github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969 +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd ea5389a79f40206170582c1ea076191b8622cb8e https://github.com/aaronlehmann/etcd # for https://github.com/coreos/etcd/pull/7830 +github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 + +# get graph and distribution packages +github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 +github.com/vbatts/tar-split v0.10.1 +github.com/resin-os/librsync-go b8cf684c55e4695256db7a149f179cb2f2a291c3 +# FIXME: make librsync not depend on this +github.com/resin-os/circbuf 56e73111d0b216a0157b0122dbba22a6bcaba395 +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb + +# get go-zfs packages +github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +github.com/pborman/uuid v1.0 + +google.golang.org/grpc v1.3.0 + +# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly +github.com/opencontainers/runc 13e66eedaddfbfeda2a73d23701000e4e63b5471 https://github.com/resin-os/runc +github.com/opencontainers/image-spec f03dbe35d449c54915d235f1a3cf8f585a24babe +github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs + +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 + +# gelf logging driver deps +github.com/Graylog2/go-gelf 7029da823dad4ef3a876df61065156acb703b2ea + +github.com/fluent/fluent-logger-golang v1.2.1 +# fluent-logger-golang deps +github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972 +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c + +# fsnotify +github.com/fsnotify/fsnotify v1.2.11 + +# awslogs deps +github.com/aws/aws-sdk-go v1.4.22 +github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 + +# logentries +github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf + +# gcplogs deps +golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0 +google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823 +cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525 +github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7 +google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 + +# containerd +github.com/containerd/containerd dda9a739d629b9ae76d668fcaaa7190f3732fdb7 https://github.com/resin-os/containerd +github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 +github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d +github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb + +# cluster +github.com/docker/swarmkit 79381d0840be27f8b3f5c667b348a4467d866eeb +github.com/gogo/protobuf v0.4 +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e +golang.org/x/crypto 08a7dbd3d99261d9ae86ef1b3b8bdb0382fb82cd +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad +github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +github.com/matttproud/golang_protobuf_extensions v1.0.0 +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 +github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 + +# cli +github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git +github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty + +# metrics +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 + +github.com/opencontainers/selinux v1.0.0-rc1 + +# docker CLI +github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c +github.com/docker/cli 9609891f575ae6c7763e01ec498ac1dbe741fba3 https://github.com/resin-os/docker-cli +github.com/docker/docker-credential-helpers v0.5.1 +github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 +github.com/docker/notary v0.4.2 +github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff +github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 +github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a +github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 +github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d +gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6 + +# containerd dependencies +github.com/cloudfoundry/gosigar 3ed7c74352dae6dc00bdc8c74045375352e3ec05 +github.com/containerd/console a3863895279f5104533fd999c1babf80faffd98c +github.com/containerd/go-runc 5fe4d8cb7fdc0fae5f5a7f4f1d65a565032401b2 +github.com/cyberdelia/go-metrics-graphite 7e54b5c2aa6eaff4286c44129c3def899dff528c +github.com/rcrowley/go-metrics eeba7bd0dd01ace6e690fa833b3f22aaec29af43 +github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca + +# runc dependencies +github.com/coreos/go-systemd v14 +github.com/godbus/dbus v3 +github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4 +github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08 +github.com/syndtr/gocapability e7cb7fa329f456b3855136a2642b197bad7366ba diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 0000000..a4c5efd --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md new file mode 100644 index 0000000..9b91f52 --- /dev/null +++ b/vendor/cloud.google.com/go/README.md @@ -0,0 +1,452 @@ +# Google Cloud for Go + +[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go) +[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) + +``` go +import "cloud.google.com/go" +``` + +Go packages for Google Cloud Platform services. + +To install the packages on your system, + +``` +$ go get -u cloud.google.com/go/... +``` + +**NOTE:** These packages are under development, and may occasionally make +backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + +## News + +_December 5, 2016_ + +More changes to BigQuery: + +* The `ValueList` type was removed. It is no longer necessary. Instead of + ```go + var v ValueList + ... it.Next(&v) .. + ``` + use + + ```go + var v []Value + ... it.Next(&v) ... + ``` + +* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or + `ValueList` would append to the slice. Now each call resets the size to zero first. + +* Schema inference will infer the SQL type BYTES for a struct field of + type []byte. Previously it inferred STRING. + +* The types `uint`, `uint64` and `uintptr` are no longer supported in schema + inference. BigQuery's integer type is INT64, and those types may hold values + that are not correctly represented in a 64-bit signed integer. + +* The SQL types DATE, TIME and DATETIME are now supported. They correspond to + the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` + package. + +_November 17, 2016_ + +Change to BigQuery: values from INTEGER columns will now be returned as int64, +not int. This will avoid errors arising from large values on 32-bit systems. + +_November 8, 2016_ + +New datastore feature: datastore now encodes your nested Go structs as Entity values, +instead of a flattened list of the embedded struct's fields. +This means that you may now have twice-nested slices, eg. +```go +type State struct { + Cities []struct{ + Populations []int + } +} +``` + +See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for +more details. + +_November 8, 2016_ + +Breaking changes to datastore: contexts no longer hold namespaces; instead you +must set a key's namespace explicitly. Also, key functions have been changed +and renamed. + +* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: + ```go + q := datastore.NewQuery("Kind").Namespace("ns") + ``` + +* All the fields of Key are exported. That means you can construct any Key with a struct literal: + ```go + k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} + ``` + +* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. + +* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace + ```go + NewIncompleteKey(ctx, kind, parent) + ``` + with + ```go + IncompleteKey(kind, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + +* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace + ```go + NewKey(ctx, kind, name, 0, parent) + NewKey(ctx, kind, "", id, parent) + ``` + with + ```go + NameKey(kind, name, parent) + IDKey(kind, id, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + +* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. + +* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. + +See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for +more details. + +_October 27, 2016_ + +Breaking change to bigquery: `NewGCSReference` is now a function, +not a method on `Client`. + +New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling +loading data into a table from a file or any `io.Reader`. + +_October 21, 2016_ + +Breaking change to pubsub: removed `pubsub.Done`. + +Use `iterator.Done` instead, where `iterator` is the package +`google.golang.org/api/iterator`. + + +[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) + +## Supported APIs + +Google API | Status | Package +-------------------------------|--------------|----------------------------------------------------------- +[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref] +[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref] +[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] +[Logging][cloud-logging] | beta | [`cloud.google.com/go/logging`][cloud-logging-ref] +[Pub/Sub][cloud-pubsub] | experimental | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] +[Vision][cloud-vision] | experimental | [`cloud.google.com/go/vision`][cloud-vision-ref] +[Language][cloud-language] | experimental | [`cloud.google.com/go/language/apiv1beta1`][cloud-language-ref] +[Speech][cloud-speech] | experimental | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref] + + +> **Experimental status**: the API is still being actively developed. As a +> result, it might change in backward-incompatible ways and is not recommended +> for production use. +> +> **Beta status**: the API is largely complete, but still has outstanding +> features and bugs to be addressed. There may be minor backwards-incompatible +> changes where necessary. +> +> **Stable status**: the API is mature and ready for production use. We will +> continue addressing bugs and feature requests. + +Documentation and examples are available at +https://godoc.org/cloud.google.com/go + +Visit or join the +[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) +for updates on these packages. + +## Go Versions Supported + +We support the two most recent major versions of Go. If Google App Engine uses +an older version, we support that as well. You can see which versions are +currently supported by looking at the lines following `go:` in +[`.travis.yml`](.travis.yml). + +## Authorization + +By default, each API will use [Google Application Default Credentials][default-creds] +for authorization credentials used in calling the API endpoints. This will allow your +application to run in many environments without requiring explicit configuration. + +To authorize using a +[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), +pass +[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) +to the `NewClient` function of the desired package. For example: + +```go +client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) +``` + +You can exert more control over authorization by using the +[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to +create an `oauth2.TokenSource`. Then pass +[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) +to the `NewClient` function: +```go +tokenSource := ... +client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +``` + +## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) + +- [About Cloud Datastore][cloud-datastore] +- [Activating the API for your project][cloud-datastore-activation] +- [API documentation][cloud-datastore-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) +- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) + +### Example Usage + +First create a `datastore.Client` to use throughout your application: + +```go +client, err := datastore.NewClient(ctx, "my-project-id") +if err != nil { + log.Fatal(err) +} +``` + +Then use that client to interact with the API: + +```go +type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time +} +keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), +} +posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, +} +if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) +} +``` + +## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) + +- [About Cloud Storage][cloud-storage] +- [API documentation][cloud-storage-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) + +### Example Usage + +First create a `storage.Client` to use throughout your application: + +```go +client, err := storage.NewClient(ctx) +if err != nil { + log.Fatal(err) +} +``` + +```go +// Read the object1 from bucket. +rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) +if err != nil { + log.Fatal(err) +} +defer rc.Close() +body, err := ioutil.ReadAll(rc) +if err != nil { + log.Fatal(err) +} +``` + +## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) + +- [About Cloud Pubsub][cloud-pubsub] +- [API documentation][cloud-pubsub-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) + +### Example Usage + +First create a `pubsub.Client` to use throughout your application: + +```go +client, err := pubsub.NewClient(ctx, "project-id") +if err != nil { + log.Fatal(err) +} +``` + +```go +// Publish "hello world" on topic1. +topic := client.Topic("topic1") +msgIDs, err := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), +}) +if err != nil { + log.Fatal(err) +} + +// Create an iterator to pull messages via subscription1. +it, err := client.Subscription("subscription1").Pull(ctx) +if err != nil { + log.Println(err) +} +defer it.Stop() + +// Consume N messages from the iterator. +for i := 0; i < N; i++ { + msg, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatalf("Failed to retrieve message: %v", err) + } + + fmt.Printf("Message %d: %s\n", i, msg.Data) + msg.Done(true) // Acknowledge that we've consumed the message. +} +``` + +## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) + +- [About Cloud BigQuery][cloud-bigquery] +- [API documentation][cloud-bigquery-docs] +- [Go client documentation][cloud-bigquery-ref] +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) + +### Example Usage + +First create a `bigquery.Client` to use throughout your application: +```go +c, err := bigquery.NewClient(ctx, "my-project-ID") +if err != nil { + // TODO: Handle error. +} +``` +Then use that client to interact with the API: +```go +// Construct a query. +q := c.Query(` + SELECT year, SUM(number) + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year +`) +// Execute the query. +it, err := q.Read(ctx) +if err != nil { + // TODO: Handle error. +} +// Iterate through the results. +for { + var values bigquery.ValueList + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) +} +``` + + +## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) + +- [About Stackdriver Logging][cloud-logging] +- [API documentation][cloud-logging-docs] +- [Go client documentation][cloud-logging-ref] +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) + +### Example Usage + +First create a `logging.Client` to use throughout your application: + +```go +ctx := context.Background() +client, err := logging.NewClient(ctx, "my-project") +if err != nil { + // TODO: Handle error. +} +``` +Usually, you'll want to add log entries to a buffer to be periodically flushed +(automatically and asynchronously) to the Stackdriver Logging service. +```go +logger := client.Logger("my-log") +logger.Log(logging.Entry{Payload: "something happened!"}) +``` +Close your client before your program exits, to flush any buffered log entries. +```go +err = client.Close() +if err != nil { + // TODO: Handle error. +} +``` + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) +document for details. We're using Gerrit for our code reviews. Please don't open pull +requests against this repo, new pull requests will be automatically closed. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore +[cloud-datastore-docs]: https://cloud.google.com/datastore/docs +[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate + +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub +[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs + +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage +[cloud-storage-docs]: https://cloud.google.com/storage/docs +[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets + +[cloud-bigtable]: https://cloud.google.com/bigtable/ +[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable + +[cloud-bigquery]: https://cloud.google.com/bigquery/ +[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs +[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery + +[cloud-logging]: https://cloud.google.com/logging/ +[cloud-logging-docs]: https://cloud.google.com/logging/docs +[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging + +[cloud-vision]: https://cloud.google.com/vision/ +[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision + +[cloud-language]: https://cloud.google.com/natural-language +[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1beta1 + +[cloud-speech]: https://cloud.google.com/speech +[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1 + +[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 0000000..f9d2bef --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,438 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" + + "cloud.google.com/go/internal" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }, + } + subscribeClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go new file mode 100644 index 0000000..8e0c8f8 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloud.go @@ -0,0 +1,64 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" +) + +const userAgent = "gcloud-golang/0.1" + +// Transport is an http.RoundTripper that appends Google Cloud client's +// user-agent to the original request's user-agent header. +type Transport struct { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 0000000..79995be --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,55 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with ctx.Err(). +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return fmt.Errorf("%v; last function err: %v", cerr, lastErr) + } + return cerr + } + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/README.md b/vendor/cloud.google.com/go/logging/apiv2/README.md new file mode 100644 index 0000000..d2d9a17 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/README.md @@ -0,0 +1,11 @@ +Auto-generated logging v2 clients +================================= + +This package includes auto-generated clients for the logging v2 API. + +Use the handwritten logging client (in the parent directory, +cloud.google.com/go/logging) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. + + diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go new file mode 100644 index 0000000..d3091be --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -0,0 +1,300 @@ +// Copyright 2016, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "fmt" + "math" + "runtime" + "strings" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +var ( + configParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + configSinkPathTemplate = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}") +) + +// ConfigCallOptions contains the retry settings for each method of ConfigClient. +type ConfigCallOptions struct { + ListSinks []gax.CallOption + GetSink []gax.CallOption + CreateSink []gax.CallOption + UpdateSink []gax.CallOption + DeleteSink []gax.CallOption +} + +func defaultConfigClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultConfigCallOptions() *ConfigCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &ConfigCallOptions{ + ListSinks: retry[[2]string{"default", "idempotent"}], + GetSink: retry[[2]string{"default", "idempotent"}], + CreateSink: retry[[2]string{"default", "non_idempotent"}], + UpdateSink: retry[[2]string{"default", "non_idempotent"}], + DeleteSink: retry[[2]string{"default", "idempotent"}], + } +} + +// ConfigClient is a client for interacting with Stackdriver Logging API. +type ConfigClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + configClient loggingpb.ConfigServiceV2Client + + // The call options for this service. + CallOptions *ConfigCallOptions + + // The metadata to be sent with each request. + metadata metadata.MD +} + +// NewConfigClient creates a new config service v2 client. +// +// Service for configuring sinks used to export log entries outside of +// Stackdriver Logging. +func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ConfigClient{ + conn: conn, + CallOptions: defaultConfigCallOptions(), + + configClient: loggingpb.NewConfigServiceV2Client(conn), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ConfigClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ConfigClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ConfigClient) SetGoogleClientInfo(name, version string) { + goVersion := strings.Replace(runtime.Version(), " ", "_", -1) + v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) + c.metadata = metadata.Pairs("x-goog-api-client", v) +} + +// ConfigParentPath returns the path for the parent resource. +func ConfigParentPath(project string) string { + path, err := configParentPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// ConfigSinkPath returns the path for the sink resource. +func ConfigSinkPath(project, sink string) string { + path, err := configSinkPathTemplate.Render(map[string]string{ + "project": project, + "sink": sink, + }) + if err != nil { + panic(err) + } + return path +} + +// ListSinks lists sinks. +func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) *LogSinkIterator { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + it := &LogSinkIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { + var resp *loggingpb.ListSinksResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.configClient.ListSinks(ctx, req) + return err + }, c.CallOptions.ListSinks...) + if err != nil { + return nil, "", err + } + return resp.Sinks, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetSink gets a sink. +func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.configClient.GetSink(ctx, req) + return err + }, c.CallOptions.GetSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateSink creates a sink. +func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.configClient.CreateSink(ctx, req) + return err + }, c.CallOptions.CreateSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSink updates or creates a sink. +func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.configClient.UpdateSink(ctx, req) + return err + }, c.CallOptions.UpdateSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteSink deletes a sink. +func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) error { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.configClient.DeleteSink(ctx, req) + return err + }, c.CallOptions.DeleteSink...) + return err +} + +// LogSinkIterator manages a stream of *loggingpb.LogSink. +type LogSinkIterator struct { + items []*loggingpb.LogSink + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogSink, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogSinkIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) { + var item *loggingpb.LogSink + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogSinkIterator) bufLen() int { + return len(it.items) +} + +func (it *LogSinkIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go new file mode 100644 index 0000000..3a92278 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -0,0 +1,26 @@ +// Copyright 2016, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package logging is an experimental, auto-generated package for the +// logging API. +// +// The Stackdriver Logging API lets you write log entries and manage your +// logs, log sinks and logs-based metrics. +// +// Use the client at cloud.google.com/go/logging in preference to this. +package logging // import "cloud.google.com/go/logging/apiv2" + +const gapicNameVersion = "gapic/0.1.0" diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go new file mode 100644 index 0000000..321b1e2 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -0,0 +1,359 @@ +// Copyright 2016, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "fmt" + "math" + "runtime" + "strings" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +var ( + loggingParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + loggingLogPathTemplate = gax.MustCompilePathTemplate("projects/{project}/logs/{log}") +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + DeleteLog []gax.CallOption + WriteLogEntries []gax.CallOption + ListLogEntries []gax.CallOption + ListMonitoredResourceDescriptors []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + {"list", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + DeleteLog: retry[[2]string{"default", "idempotent"}], + WriteLogEntries: retry[[2]string{"default", "non_idempotent"}], + ListLogEntries: retry[[2]string{"list", "idempotent"}], + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Logging API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client loggingpb.LoggingServiceV2Client + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + metadata metadata.MD +} + +// NewClient creates a new logging service v2 client. +// +// Service for ingesting and querying logs. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: loggingpb.NewLoggingServiceV2Client(conn), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(name, version string) { + goVersion := strings.Replace(runtime.Version(), " ", "_", -1) + v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) + c.metadata = metadata.Pairs("x-goog-api-client", v) +} + +// LoggingParentPath returns the path for the parent resource. +func LoggingParentPath(project string) string { + path, err := loggingParentPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// LoggingLogPath returns the path for the log resource. +func LoggingLogPath(project, log string) string { + path, err := loggingLogPathTemplate.Render(map[string]string{ + "project": project, + "log": log, + }) + if err != nil { + panic(err) + } + return path +} + +// DeleteLog deletes all the log entries in a log. +// The log reappears if it receives new entries. +func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) error { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.DeleteLog(ctx, req) + return err + }, c.CallOptions.DeleteLog...) + return err +} + +// WriteLogEntries writes log entries to Stackdriver Logging. All log entries are +// written by this method. +func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + var resp *loggingpb.WriteLogEntriesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.WriteLogEntries(ctx, req) + return err + }, c.CallOptions.WriteLogEntries...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries from Cloud +// Logging. For ways to export log entries, see +// [Exporting Logs](/logging/docs/export). +func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) *LogEntryIterator { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + it := &LogEntryIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { + var resp *loggingpb.ListLogEntriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.ListLogEntries(ctx, req) + return err + }, c.CallOptions.ListLogEntries...) + if err != nil { + return nil, "", err + } + return resp.Entries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListMonitoredResourceDescriptors lists the monitored resource descriptors used by Stackdriver Logging. +func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + it := &MonitoredResourceDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *loggingpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req) + return err + }, c.CallOptions.ListMonitoredResourceDescriptors...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// LogEntryIterator manages a stream of *loggingpb.LogEntry. +type LogEntryIterator struct { + items []*loggingpb.LogEntry + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogEntry, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogEntryIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) { + var item *loggingpb.LogEntry + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogEntryIterator) bufLen() int { + return len(it.items) +} + +func (it *LogEntryIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go new file mode 100644 index 0000000..2345978 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -0,0 +1,299 @@ +// Copyright 2016, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "fmt" + "math" + "runtime" + "strings" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +var ( + metricsParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}") +) + +// MetricsCallOptions contains the retry settings for each method of MetricsClient. +type MetricsCallOptions struct { + ListLogMetrics []gax.CallOption + GetLogMetric []gax.CallOption + CreateLogMetric []gax.CallOption + UpdateLogMetric []gax.CallOption + DeleteLogMetric []gax.CallOption +} + +func defaultMetricsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultMetricsCallOptions() *MetricsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &MetricsCallOptions{ + ListLogMetrics: retry[[2]string{"default", "idempotent"}], + GetLogMetric: retry[[2]string{"default", "idempotent"}], + CreateLogMetric: retry[[2]string{"default", "non_idempotent"}], + UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}], + DeleteLogMetric: retry[[2]string{"default", "idempotent"}], + } +} + +// MetricsClient is a client for interacting with Stackdriver Logging API. +type MetricsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricsClient loggingpb.MetricsServiceV2Client + + // The call options for this service. + CallOptions *MetricsCallOptions + + // The metadata to be sent with each request. + metadata metadata.MD +} + +// NewMetricsClient creates a new metrics service v2 client. +// +// Service for configuring logs-based metrics. +func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricsClient{ + conn: conn, + CallOptions: defaultMetricsCallOptions(), + + metricsClient: loggingpb.NewMetricsServiceV2Client(conn), + } + c.SetGoogleClientInfo("gax", gax.Version) + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricsClient) SetGoogleClientInfo(name, version string) { + goVersion := strings.Replace(runtime.Version(), " ", "_", -1) + v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion) + c.metadata = metadata.Pairs("x-goog-api-client", v) +} + +// MetricsParentPath returns the path for the parent resource. +func MetricsParentPath(project string) string { + path, err := metricsParentPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// MetricsMetricPath returns the path for the metric resource. +func MetricsMetricPath(project, metric string) string { + path, err := metricsMetricPathTemplate.Render(map[string]string{ + "project": project, + "metric": metric, + }) + if err != nil { + panic(err) + } + return path +} + +// ListLogMetrics lists logs-based metrics. +func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) *LogMetricIterator { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + it := &LogMetricIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { + var resp *loggingpb.ListLogMetricsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricsClient.ListLogMetrics(ctx, req) + return err + }, c.CallOptions.ListLogMetrics...) + if err != nil { + return nil, "", err + } + return resp.Metrics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetLogMetric gets a logs-based metric. +func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricsClient.GetLogMetric(ctx, req) + return err + }, c.CallOptions.GetLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateLogMetric creates a logs-based metric. +func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricsClient.CreateLogMetric(ctx, req) + return err + }, c.CallOptions.CreateLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateLogMetric creates or updates a logs-based metric. +func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricsClient.UpdateLogMetric(ctx, req) + return err + }, c.CallOptions.UpdateLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteLogMetric deletes a logs-based metric. +func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) error { + md, _ := metadata.FromContext(ctx) + ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.metricsClient.DeleteLogMetric(ctx, req) + return err + }, c.CallOptions.DeleteLogMetric...) + return err +} + +// LogMetricIterator manages a stream of *loggingpb.LogMetric. +type LogMetricIterator struct { + items []*loggingpb.LogMetric + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogMetric, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogMetricIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) { + var item *loggingpb.LogMetric + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogMetricIterator) bufLen() int { + return len(it.items) +} + +func (it *LogMetricIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go new file mode 100644 index 0000000..6da3adf --- /dev/null +++ b/vendor/cloud.google.com/go/logging/doc.go @@ -0,0 +1,89 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package logging contains a Stackdriver Logging client suitable for writing logs. +For reading logs, and working with sinks, metrics and monitored resources, +see package cloud.google.com/go/logging/logadmin. + +This client uses Logging API v2. +See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. + +This package is experimental and subject to API changes. + + +Creating a Client + +Use a Client to interact with the Stackdriver Logging API. + + // Create a Client + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + + +Basic Usage + +For most use-cases, you'll want to add log entries to a buffer to be periodically +flushed (automatically and asynchronously) to the Stackdriver Logging service. + + // Initialize a logger + lg := client.Logger("my-log") + + // Add entry to log buffer + lg.Log(logging.Entry{Payload: "something happened!"}) + + +Closing your Client + +You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service. + + // Close the client when finished. + err = client.Close() + if err != nil { + // TODO: Handle error. + } + + +Synchronous Logging + +For critical errors, you may want to send your log entries immediately. +LogSync is slow and will block until the log entry has been sent, so it is +not recommended for basic use. + + lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"}) + + +The Standard Logger Interface + +You may want use a standard log.Logger in your program. + + // stdlg implements log.Logger + stdlg := lg.StandardLogger(logging.Info) + stdlg.Println("some info") + + +Log Levels + +An Entry may have one of a number of severity levels associated with it. + + logging.Entry{ + Payload: "something terrible happened!", + Severity: logging.Critical, + } + +*/ +package logging // import "cloud.google.com/go/logging" diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go new file mode 100644 index 0000000..7d8ece0 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/common.go @@ -0,0 +1,30 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "strings" +) + +const ( + ProdAddr = "logging.googleapis.com:443" + Version = "0.2.0" +) + +func LogPath(parent, logID string) string { + logID = strings.Replace(logID, "/", "%2F", -1) + return fmt.Sprintf("%s/logs/%s", parent, logID) +} diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go new file mode 100644 index 0000000..8506800 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -0,0 +1,674 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// API/gRPC features intentionally missing from this client: +// - You cannot have the server pick the time of the entry. This client +// always sends a time. +// - There is no way to provide a protocol buffer payload. +// - No support for the "partial success" feature when writing log entries. + +// TODO(jba): test whether forward-slash characters in the log ID must be URL-encoded. +// These features are missing now, but will likely be added: +// - There is no way to specify CallOptions. + +package logging + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net/http" + "strconv" + "strings" + "sync" + "time" + + vkit "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/internal" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + structpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +const ( + // Scope for reading from the logging service. + ReadScope = "https://www.googleapis.com/auth/logging.read" + + // Scope for writing to the logging service. + WriteScope = "https://www.googleapis.com/auth/logging.write" + + // Scope for administrative actions on the logging service. + AdminScope = "https://www.googleapis.com/auth/logging.admin" +) + +const ( + // defaultErrorCapacity is the capacity of the channel used to deliver + // errors to the OnError function. + defaultErrorCapacity = 10 + + // DefaultDelayThreshold is the default value for the DelayThreshold LoggerOption. + DefaultDelayThreshold = time.Second + + // DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption. + DefaultEntryCountThreshold = 1000 + + // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption. + DefaultEntryByteThreshold = 1 << 20 // 1MiB + + // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption. + DefaultBufferedByteLimit = 1 << 30 // 1GiB +) + +// For testing: +var now = time.Now + +// ErrOverflow signals that the number of buffered entries for a Logger +// exceeds its BufferLimit. +var ErrOverflow = errors.New("logging: log entry overflowed buffer limits") + +// Client is a Logging client. A Client is associated with a single Cloud project. +type Client struct { + client *vkit.Client // client for the logging service + projectID string + errc chan error // should be buffered to minimize dropped errors + donec chan struct{} // closed on Client.Close to close Logger bundlers + loggers sync.WaitGroup // so we can wait for loggers to close + closed bool + + // OnError is called when an error occurs in a call to Log or Flush. The + // error may be due to an invalid Entry, an overflow because BufferLimit + // was reached (in which case the error will be ErrOverflow) or an error + // communicating with the logging service. OnError is called with errors + // from all Loggers. It is never called concurrently. OnError is expected + // to return quickly; if errors occur while OnError is running, some may + // not be reported. The default behavior is to call log.Printf. + // + // This field should be set only once, before any method of Client is called. + OnError func(err error) +} + +// NewClient returns a new logging client associated with the provided project ID. +// +// By default NewClient uses WriteScope. To use a different scope, call +// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + // Check for '/' in project ID to reserve the ability to support various owning resources, + // in the form "{Collection}/{Name}", for instance "organizations/my-org". + if strings.ContainsRune(projectID, '/') { + return nil, errors.New("logging: project ID contains '/'") + } + opts = append([]option.ClientOption{ + option.WithEndpoint(internal.ProdAddr), + option.WithScopes(WriteScope), + }, opts...) + c, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + c.SetGoogleClientInfo("logging", internal.Version) + client := &Client{ + client: c, + projectID: projectID, + errc: make(chan error, defaultErrorCapacity), // create a small buffer for errors + donec: make(chan struct{}), + OnError: func(e error) { log.Printf("logging client: %v", e) }, + } + // Call the user's function synchronously, to make life easier for them. + go func() { + for err := range client.errc { + // This reference to OnError is memory-safe if the user sets OnError before + // calling any client methods. The reference happens before the first read from + // client.errc, which happens before the first write to client.errc, which + // happens before any call, which happens before the user sets OnError. + if fn := client.OnError; fn != nil { + fn(err) + } else { + log.Printf("logging (project ID %q): %v", projectID, err) + } + } + }() + return client, nil +} + +// parent returns the string used in many RPCs to denote the parent resource of the log. +func (c *Client) parent() string { + return "projects/" + c.projectID +} + +var unixZeroTimestamp *tspb.Timestamp + +func init() { + var err error + unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0)) + if err != nil { + panic(err) + } +} + +// Ping reports whether the client's connection to the logging service and the +// authentication configuration are valid. To accomplish this, Ping writes a +// log entry "ping" to a log named "ping". +func (c *Client) Ping(ctx context.Context) error { + ent := &logpb.LogEntry{ + Payload: &logpb.LogEntry_TextPayload{"ping"}, + Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both + InsertId: "ping", // necessary for the service to dedup these entries. + } + _, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: internal.LogPath(c.parent(), "ping"), + Resource: &mrpb.MonitoredResource{Type: "global"}, + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// A Logger is used to write log messages to a single log. It can be configured +// with a log ID, common monitored resource, and a set of common labels. +type Logger struct { + client *Client + logName string // "projects/{projectID}/logs/{logID}" + stdLoggers map[Severity]*log.Logger + bundler *bundler.Bundler + + // Options + commonResource *mrpb.MonitoredResource + commonLabels map[string]string +} + +// A LoggerOption is a configuration option for a Logger. +type LoggerOption interface { + set(*Logger) +} + +// CommonResource sets the monitored resource associated with all log entries +// written from a Logger. If not provided, a resource of type "global" is used. +// This value can be overridden by setting an Entry's Resource field. +func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} } + +type commonResource struct{ *mrpb.MonitoredResource } + +func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource } + +// CommonLabels are labels that apply to all log entries written from a Logger, +// so that you don't have to repeat them in each log entry's Labels field. If +// any of the log entries contains a (key, value) with the same key that is in +// CommonLabels, then the entry's (key, value) overrides the one in +// CommonLabels. +func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) } + +type commonLabels map[string]string + +func (c commonLabels) set(l *Logger) { l.commonLabels = c } + +// DelayThreshold is the maximum amount of time that an entry should remain +// buffered in memory before a call to the logging service is triggered. Larger +// values of DelayThreshold will generally result in fewer calls to the logging +// service, while increasing the risk that log entries will be lost if the +// process crashes. +// The default is DefaultDelayThreshold. +func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) } + +type delayThreshold time.Duration + +func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) } + +// EntryCountThreshold is the maximum number of entries that will be buffered +// in memory before a call to the logging service is triggered. Larger values +// will generally result in fewer calls to the logging service, while +// increasing both memory consumption and the risk that log entries will be +// lost if the process crashes. +// The default is DefaultEntryCountThreshold. +func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) } + +type entryCountThreshold int + +func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) } + +// EntryByteThreshold is the maximum number of bytes of entries that will be +// buffered in memory before a call to the logging service is triggered. See +// EntryCountThreshold for a discussion of the tradeoffs involved in setting +// this option. +// The default is DefaultEntryByteThreshold. +func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) } + +type entryByteThreshold int + +func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } + +// EntryByteLimit is the maximum number of bytes of entries that will be sent +// in a single call to the logging service. This option limits the size of a +// single RPC payload, to account for network or service issues with large +// RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has +// no effect. +// The default is zero, meaning there is no limit. +func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } + +type entryByteLimit int + +func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) } + +// BufferedByteLimit is the maximum number of bytes that the Logger will keep +// in memory before returning ErrOverflow. This option limits the total memory +// consumption of the Logger (but note that each Logger has its own, separate +// limit). It is possible to reach BufferedByteLimit even if it is larger than +// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter +// two options may be enqueued (and hence occupying memory) while new log +// entries are being added. +// The default is DefaultBufferedByteLimit. +func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) } + +type bufferedByteLimit int + +func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } + +// Logger returns a Logger that will write entries with the given log ID, such as +// "syslog". A log ID must be less than 512 characters long and can only +// include the following characters: upper and lower case alphanumeric +// characters: [A-Za-z0-9]; and punctuation characters: forward-slash, +// underscore, hyphen, and period. +func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { + l := &Logger{ + client: c, + logName: internal.LogPath(c.parent(), logID), + commonResource: &mrpb.MonitoredResource{Type: "global"}, + } + // TODO(jba): determine the right context for the bundle handler. + ctx := context.TODO() + l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { + l.writeLogEntries(ctx, entries.([]*logpb.LogEntry)) + }) + l.bundler.DelayThreshold = DefaultDelayThreshold + l.bundler.BundleCountThreshold = DefaultEntryCountThreshold + l.bundler.BundleByteThreshold = DefaultEntryByteThreshold + l.bundler.BufferedByteLimit = DefaultBufferedByteLimit + for _, opt := range opts { + opt.set(l) + } + + l.stdLoggers = map[Severity]*log.Logger{} + for s := range severityName { + l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0) + } + c.loggers.Add(1) + go func() { + defer c.loggers.Done() + <-c.donec + l.bundler.Close() + }() + return l +} + +type severityWriter struct { + l *Logger + s Severity +} + +func (w severityWriter) Write(p []byte) (n int, err error) { + w.l.Log(Entry{ + Severity: w.s, + Payload: string(p), + }) + return len(p), nil +} + +// Close closes the client. +func (c *Client) Close() error { + if c.closed { + return nil + } + close(c.donec) // close Logger bundlers + c.loggers.Wait() // wait for all bundlers to flush and close + // Now there can be no more errors. + close(c.errc) // terminate error goroutine + // Return only the first error. Since all clients share an underlying connection, + // Closes after the first always report a "connection is closing" error. + err := c.client.Close() + c.closed = true + return err +} + +// Severity is the severity of the event described in a log entry. These +// guideline severity levels are ordered, with numerically smaller levels +// treated as less severe than numerically larger levels. +type Severity int + +const ( + // Default means the log entry has no assigned severity level. + Default = Severity(logtypepb.LogSeverity_DEFAULT) + // Debug means debug or trace information. + Debug = Severity(logtypepb.LogSeverity_DEBUG) + // Info means routine information, such as ongoing status or performance. + Info = Severity(logtypepb.LogSeverity_INFO) + // Notice means normal but significant events, such as start up, shut down, or configuration. + Notice = Severity(logtypepb.LogSeverity_NOTICE) + // Warning means events that might cause problems. + Warning = Severity(logtypepb.LogSeverity_WARNING) + // Error means events that are likely to cause problems. + Error = Severity(logtypepb.LogSeverity_ERROR) + // Critical means events that cause more severe problems or brief outages. + Critical = Severity(logtypepb.LogSeverity_CRITICAL) + // Alert means a person must take an action immediately. + Alert = Severity(logtypepb.LogSeverity_ALERT) + // Emergency means one or more systems are unusable. + Emergency = Severity(logtypepb.LogSeverity_EMERGENCY) +) + +var severityName = map[Severity]string{ + Default: "Default", + Debug: "Debug", + Info: "Info", + Notice: "Notice", + Warning: "Warning", + Error: "Error", + Critical: "Critical", + Alert: "Alert", + Emergency: "Emergency", +} + +// String converts a severity level to a string. +func (v Severity) String() string { + // same as proto.EnumName + s, ok := severityName[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// ParseSeverity returns the Severity whose name equals s, ignoring case. It +// returns Default if no Severity matches. +func ParseSeverity(s string) Severity { + sl := strings.ToLower(s) + for sev, name := range severityName { + if strings.ToLower(name) == sl { + return sev + } + } + return Default +} + +// Entry is a log entry. +// See https://cloud.google.com/logging/docs/view/logs_index for more about entries. +type Entry struct { + // Timestamp is the time of the entry. If zero, the current time is used. + Timestamp time.Time + + // Severity is the entry's severity level. + // The zero value is Default. + Severity Severity + + // Payload must be either a string or something that + // marshals via the encoding/json package to a JSON object + // (and not any other type of JSON value). + Payload interface{} + + // Labels optionally specifies key/value labels for the log entry. + // The Logger.Log method takes ownership of this map. See Logger.CommonLabels + // for more about labels. + Labels map[string]string + + // InsertID is a unique ID for the log entry. If you provide this field, + // the logging service considers other log entries in the same log with the + // same ID as duplicates which can be removed. If omitted, the logging + // service will generate a unique ID for this log entry. Note that because + // this client retries RPCs automatically, it is possible (though unlikely) + // that an Entry without an InsertID will be written more than once. + InsertID string + + // HTTPRequest optionally specifies metadata about the HTTP request + // associated with this log entry, if applicable. It is optional. + HTTPRequest *HTTPRequest + + // Operation optionally provides information about an operation associated + // with the log entry, if applicable. + Operation *logpb.LogEntryOperation + + // LogName is the full log name, in the form + // "projects/{ProjectID}/logs/{LogID}". It is set by the client when + // reading entries. It is an error to set it when writing entries. + LogName string + + // Resource is the monitored resource associated with the entry. It is set + // by the client when reading entries. It is an error to set it when + // writing entries. + Resource *mrpb.MonitoredResource +} + +// HTTPRequest contains an http.Request as well as additional +// information about the request and its response. +type HTTPRequest struct { + // Request is the http.Request passed to the handler. + Request *http.Request + + // RequestSize is the size of the HTTP request message in bytes, including + // the request headers and the request body. + RequestSize int64 + + // Status is the response code indicating the status of the response. + // Examples: 200, 404. + Status int + + // ResponseSize is the size of the HTTP response message sent back to the client, in bytes, + // including the response headers and the response body. + ResponseSize int64 + + // Latency is the request processing latency on the server, from the time the request was + // received until the response was sent. + Latency time.Duration + + // RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the + // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". + RemoteIP string + + // CacheHit reports whether an entity was served from cache (with or without + // validation). + CacheHit bool + + // CacheValidatedWithOriginServer reports whether the response was + // validated with the origin server before being served from cache. This + // field is only meaningful if CacheHit is true. + CacheValidatedWithOriginServer bool +} + +func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { + if r == nil { + return nil + } + if r.Request == nil { + panic("HTTPRequest must have a non-nil Request") + } + u := *r.Request.URL + u.Fragment = "" + return &logtypepb.HttpRequest{ + RequestMethod: r.Request.Method, + RequestUrl: u.String(), + RequestSize: r.RequestSize, + Status: int32(r.Status), + ResponseSize: r.ResponseSize, + Latency: ptypes.DurationProto(r.Latency), + UserAgent: r.Request.UserAgent(), + RemoteIp: r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr? + Referer: r.Request.Referer(), + CacheHit: r.CacheHit, + CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer, + } +} + +// toProtoStruct converts v, which must marshal into a JSON object, +// into a Google Struct proto. +func toProtoStruct(v interface{}) (*structpb.Struct, error) { + // Fast path: if v is already a *structpb.Struct, nothing to do. + if s, ok := v.(*structpb.Struct); ok { + return s, nil + } + // v is a Go struct that supports JSON marshalling. We want a Struct + // protobuf. Some day we may have a more direct way to get there, but right + // now the only way is to marshal the Go struct to JSON, unmarshal into a + // map, and then build the Struct proto from the map. + jb, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("logging: json.Marshal: %v", err) + } + var m map[string]interface{} + err = json.Unmarshal(jb, &m) + if err != nil { + return nil, fmt.Errorf("logging: json.Unmarshal: %v", err) + } + return jsonMapToProtoStruct(m), nil +} + +func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct { + fields := map[string]*structpb.Value{} + for k, v := range m { + fields[k] = jsonValueToStructValue(v) + } + return &structpb.Struct{Fields: fields} +} + +func jsonValueToStructValue(v interface{}) *structpb.Value { + switch x := v.(type) { + case bool: + return &structpb.Value{Kind: &structpb.Value_BoolValue{x}} + case float64: + return &structpb.Value{Kind: &structpb.Value_NumberValue{x}} + case string: + return &structpb.Value{Kind: &structpb.Value_StringValue{x}} + case nil: + return &structpb.Value{Kind: &structpb.Value_NullValue{}} + case map[string]interface{}: + return &structpb.Value{Kind: &structpb.Value_StructValue{jsonMapToProtoStruct(x)}} + case []interface{}: + var vals []*structpb.Value + for _, e := range x { + vals = append(vals, jsonValueToStructValue(e)) + } + return &structpb.Value{Kind: &structpb.Value_ListValue{&structpb.ListValue{vals}}} + default: + panic(fmt.Sprintf("bad type %T for JSON value", v)) + } +} + +// LogSync logs the Entry synchronously without any buffering. Because LogSync is slow +// and will block, it is intended primarily for debugging or critical errors. +// Prefer Log for most uses. +// TODO(jba): come up with a better name (LogNow?) or eliminate. +func (l *Logger) LogSync(ctx context.Context, e Entry) error { + ent, err := toLogEntry(e) + if err != nil { + return err + } + _, err = l.client.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// Log buffers the Entry for output to the logging service. It never blocks. +func (l *Logger) Log(e Entry) { + ent, err := toLogEntry(e) + if err != nil { + l.error(err) + return + } + if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { + l.error(err) + } +} + +// Flush blocks until all currently buffered log entries are sent. +func (l *Logger) Flush() { + l.bundler.Flush() +} + +func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) { + req := &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: entries, + } + _, err := l.client.client.WriteLogEntries(ctx, req) + if err != nil { + l.error(err) + } +} + +// error puts the error on the client's error channel +// without blocking. +func (l *Logger) error(err error) { + select { + case l.client.errc <- err: + default: + } +} + +// StandardLogger returns a *log.Logger for the provided severity. +// +// This method is cheap. A single log.Logger is pre-allocated for each +// severity level in each Logger. Callers may mutate the returned log.Logger +// (for example by calling SetFlags or SetPrefix). +func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] } + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +func toLogEntry(e Entry) (*logpb.LogEntry, error) { + if e.LogName != "" { + return nil, errors.New("logging: Entry.LogName should be not be set when writing") + } + t := e.Timestamp + if t.IsZero() { + t = now() + } + ts, err := ptypes.TimestampProto(t) + if err != nil { + return nil, err + } + ent := &logpb.LogEntry{ + Timestamp: ts, + Severity: logtypepb.LogSeverity(e.Severity), + InsertId: e.InsertID, + HttpRequest: fromHTTPRequest(e.HTTPRequest), + Operation: e.Operation, + Labels: e.Labels, + } + + switch p := e.Payload.(type) { + case string: + ent.Payload = &logpb.LogEntry_TextPayload{p} + default: + s, err := toProtoStruct(p) + if err != nil { + return nil, err + } + ent.Payload = &logpb.LogEntry_JsonPayload{s} + } + return ent, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 0000000..e3d9a64 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 0000000..e25e382 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,9 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 0000000..96504a3 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 0000000..8d66e77 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 0000000..1bd6057 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + logger.Infof("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 0000000..4be35c5 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + logger.Infof("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 0000000..2189eb6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + logger.Infof("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 0000000..7b1b9ad --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + logger.Infof("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 0000000..98087b3 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 0000000..52451e9 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 0000000..24062d4 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + logger.Infof("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 0000000..169f68d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,136 @@ +package ansiterm + +import ( + "errors" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiParser.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.InfoLevel, + } + + parser := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + + parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} + parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} + parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} + parser.escape = escapeState{baseState{name: "Escape", parser: parser}} + parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} + parser.error = errorState{baseState{name: "Error", parser: parser}} + parser.ground = groundState{baseState{name: "Ground", parser: parser}} + parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} + + parser.stateMap = []state{ + parser.csiEntry, + parser.csiParam, + parser.dcsEntry, + parser.escape, + parser.escapeIntermediate, + parser.error, + parser.ground, + parser.oscString, + } + + parser.currState = getState(initialState, parser.stateMap) + + logger.Infof("CreateParser: parser %p", parser) + return parser +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + logger.Warning("newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 0000000..8b69a67 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,103 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + logger.Infof("Parsed params: %v with length: %d", params, len(params)) + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + logger.Infof("getInt: %v", i) + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + logger.Infof("getInts: %v", ints) + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 0000000..58750a2 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,122 @@ +package ansiterm + +import ( + "fmt" +) + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + logger.Infof("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + logger.Infof("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) + logger.Infof("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + + logger.Infof("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 0000000..f2ea1fc --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 0000000..3921144 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 0000000..daf2f06 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,182 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 0000000..462d92f --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,322 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 0000000..cbec8f7 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 0000000..f015723 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 0000000..244b5fa --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 0000000..706d270 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 0000000..afa7635 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 0000000..4d858ed --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,726 @@ +// +build windows + +package winterm + +import ( + "bytes" + "io/ioutil" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD +} + +func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("winEventHandler.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + return &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + logger.Info("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + logger.Info("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + logger.Info("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + logger.Info("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + logger.Info("set buffer failed:", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + logger.Info("set window failed:", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + logger.Info("set buffer failed:", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + logger.Infof("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + logger.Infof("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + logger.Info("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + logger.Info("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + logger.Infof("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000..5a8e332 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 0000000..e861c0c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,220 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/mojombo/toml + +Compatible with TOML version +[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) + +Documentation: http://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) + + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. + diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 0000000..6c7d398 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,492 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +var e = fmt.Errorf + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, rvalue(v)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("Unsupported type '%s'.", rv.Kind()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("Unsupported type '%s'.", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + return mismatch(rv, "map", mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return e("Type mismatch for '%s.%s': %s", + rv.Type().String(), f.name, err) + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("Field '%s.%s' is unexported, and therefore cannot "+ + "be loaded with reflection.", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + return badtype("slice", data) + } + sliceLen := datav.Len() + if rv.IsNil() { + rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen)) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("Value '%d' is out of range for int8.", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("Value '%d' is out of range for int16.", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("Value '%d' is out of range for int32.", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("Value '%d' is out of range for uint8.", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("Value '%d' is out of range for uint16.", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("Value '%d' is out of range for uint32.", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanAddr() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("Expected %s but found '%T'.", expected, data) +} + +func mismatch(user reflect.Value, expected string, data interface{}) error { + return e("Type mismatch for %s. Expected %s but found '%T'.", + user.Type().String(), expected, data) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 0000000..ef6f545 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,122 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } else { + return k[i] + } +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 0000000..fe26800 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/mojombo/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 0000000..64e8c47 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,496 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "can't encode array with mixed element types") + errArrayNilElement = errors.New( + "can't encode array with nil element") + errNonString = errors.New( + "can't encode a map with non-string key type") + errAnonNonStruct = errors.New( + "can't encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "TOML array element can't contain a table") + errNoKey = errors.New( + "top-level values must be a Go map or struct") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("Unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("Unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra new line between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexporded fields + if f.PkgPath != "" { + continue + } + frv := rv.Field(i) + if f.Anonymous { + frv := eindirect(frv) + t := frv.Type() + if t.Kind() != reflect.Struct { + encPanic(errAnonNonStruct) + } + addFields(t, frv, f.Index) + } else if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + keyName := sft.Tag.Get("toml") + if keyName == "-" { + continue + } + if keyName == "" { + keyName = sft.Name + } + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } else { + return tomlArray + } + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 0000000..d36e1dd --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 0000000..e8d503d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 0000000..2191228 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,874 @@ +package toml + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart +) + +const ( + eof = 0 + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + arrayValTerm = ',' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + width int + line int + state stateFn + items chan item + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input + "\n", + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop.") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.pos >= len(lx.input) { + lx.width = 0 + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.pos += lx.width + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only once per call of next. +func (lx *lexer) backup() { + lx.pos -= lx.width + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (new lines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("Unexpected EOF.") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a new line. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a new line for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.ignore() + return lexTop + } + return lx.errorf("Expected a top-level item to end with a new line, "+ + "comment or EOF, but got %q instead.", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("Expected end of table array name delimiter %q, "+ + "but got %q instead.", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("Unexpected end of table name. (Table names cannot " + + "be empty.)") + case r == tableSep: + return lx.errorf("Unexpected table separator. (Table names cannot " + + "be empty.)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + case isWhitespace(r): + return lexTableNameStart + default: + return lexBareTableName + } +} + +// lexTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareTableName + case r == tableSep || r == tableEnd: + lx.backup() + lx.emitTrim(itemText) + return lexTableNameEnd + default: + return lx.errorf("Bare keys cannot contain %q.", r) + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ + "instead.", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("Unexpected key separator %q.", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.emitTrim(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emitTrim(itemText) + return lexKeyEnd + default: + return lx.errorf("Bare keys cannot contain %q.", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("Expected key separator %q, but got %q instead.", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT new lines. + // In array syntax, the array states are responsible for ignoring new + // lines. + r := lx.next() + if isWhitespace(r) { + return lexSkip(lx, lexValue) + } + + switch { + case r == arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case r == stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case r == rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case r == 't': + return lexTrue + case r == 'f': + return lexFalse + case r == '-': + return lexNumberStart + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + case r == '.': // special error case, be kind to users + return lx.errorf("Floats must start with a digit, not '.'.") + } + return lx.errorf("Expected value but found %q instead.", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and new lines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == arrayValTerm: + return lx.errorf("Unexpected array value terminator %q.", + arrayValTerm) + case r == arrayEnd: + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes the cruft between values of an array. Namely, +// it ignores whitespace and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == arrayValTerm: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf("Expected an array value terminator %q or an array "+ + "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) +} + +// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has +// just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case isNL(r): + return lx.errorf("Strings cannot contain new lines.") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '\\': + return lexMultilineStringEscape + case r == stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case isNL(r): + return lx.errorf("Strings cannot contain new lines.") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + lx.next() + return lexMultilineString + } else { + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) + } +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("Invalid escape character %q. Only the following "+ + "escape characters are allowed: "+ + "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ + "\\uXXXX and \\UXXXXXXXX.", r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf("Expected four hexadecimal digits after '\\u', "+ + "but got '%s' instead.", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ + "but got '%s' instead.", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either a (positive) integer, float or +// datetime. It assumes that NO negative sign has been consumed. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("Floats must start with a digit, not '.'.") + } else { + return lx.errorf("Expected a digit but got %q.", r) + } + } + return lexNumberOrDate +} + +// lexNumberOrDate consumes either a (positive) integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '-': + if lx.pos-lx.start != 5 { + return lx.errorf("All ISO8601 dates must be in full Zulu form.") + } + return lexDateAfterYear + case isDigit(r): + return lexNumberOrDate + case r == '.': + return lexFloatStart + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format. +// It assumes that "YYYY-" has already been consumed. +func lexDateAfterYear(lx *lexer) stateFn { + formats := []rune{ + // digits are '0'. + // everything else is direct equality. + '0', '0', '-', '0', '0', + 'T', + '0', '0', ':', '0', '0', ':', '0', '0', + 'Z', + } + for _, f := range formats { + r := lx.next() + if f == '0' { + if !isDigit(r) { + return lx.errorf("Expected digit in ISO8601 datetime, "+ + "but found %q instead.", r) + } + } else if f != r { + return lx.errorf("Expected %q in ISO8601 datetime, "+ + "but found %q instead.", f, r) + } + } + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that +// a negative sign has already been read, but that *no* digits have been +// consumed. lexNumberStart will move to the appropriate integer or float +// states. +func lexNumberStart(lx *lexer) stateFn { + // we MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("Floats must start with a digit, not '.'.") + } else { + return lx.errorf("Expected a digit but got %q.", r) + } + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + switch { + case isDigit(r): + return lexNumber + case r == '.': + return lexFloatStart + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloatStart starts the consumption of digits of a float after a '.'. +// Namely, at least one digit is required. +func lexFloatStart(lx *lexer) stateFn { + r := lx.next() + if !isDigit(r) { + return lx.errorf("Floats must have a digit after the '.', but got "+ + "%q instead.", r) + } + return lexFloat +} + +// lexFloat consumes the digits of a float after a '.'. +// Assumes that one digit has been consumed after a '.' already. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexConst consumes the s[1:] in s. It assumes that s[0] has already been +// consumed. +func lexConst(lx *lexer, s string) stateFn { + for i := range s[1:] { + if r := lx.next(); r != rune(s[i+1]) { + return lx.errorf("Expected %q, but found %q instead.", s[:i+1], + s[:i]+string(r)) + } + } + return nil +} + +// lexTrue consumes the "rue" in "true". It assumes that 't' has already +// been consumed. +func lexTrue(lx *lexer) stateFn { + if fn := lexConst(lx, "true"); fn != nil { + return fn + } + lx.emit(itemBool) + return lx.pop() +} + +// lexFalse consumes the "alse" in "false". It assumes that 'f' has already +// been consumed. +func lexFalse(lx *lexer) stateFn { + if fn := lexConst(lx, "false"); fn != nil { + return fn + } + lx.emit(itemBool) + return lx.pop() +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first new line character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString: + return "String" + case itemRawString: + return "String" + case itemMultilineString: + return "String" + case itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 0000000..c6069be --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,498 @@ +package toml + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + num, err := strconv.ParseInt(it.val, 10, 64) + if err != nil { + // See comment below for floats describing why we make a + // distinction between a bug and a user error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + num, err := strconv.ParseFloat(it.val, 64) + if err != nil { + // Distinguish float values. Normally, it'd be a bug if the lexer + // provides an invalid float, but it's possible that the float is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + // + // This is also true for integers. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.bug("Expected float value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + t, err := time.Parse("2006-01-02T15:04:05Z", it.val) + if err != nil { + p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:len(s)] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + + // BUG(burntsushi) + // I honestly don't understand how this works. I can't seem + // to find a way to make this fail. I figured this would fail on invalid + // UTF-8 characters like U+DCFF, but it doesn't. + if !utf8.ValidString(string(rune(hex))) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 0000000..c73f8af --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 0000000..7592f87 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,241 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + name := sf.Tag.Get("toml") + if name == "-" { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/Graylog2/go-gelf/LICENSE b/vendor/github.com/Graylog2/go-gelf/LICENSE new file mode 100644 index 0000000..bc756ae --- /dev/null +++ b/vendor/github.com/Graylog2/go-gelf/LICENSE @@ -0,0 +1,21 @@ +Copyright 2012 SocialCode + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/Graylog2/go-gelf/README.md b/vendor/github.com/Graylog2/go-gelf/README.md new file mode 100644 index 0000000..4900a35 --- /dev/null +++ b/vendor/github.com/Graylog2/go-gelf/README.md @@ -0,0 +1,76 @@ +go-gelf - GELF library and writer for Go +======================================== + +GELF is graylog2's UDP logging format. This library provides an API +that applications can use to log messages directly to a graylog2 +server, along with an `io.Writer` that can be use to redirect the +standard library's log messages (or `os.Stdout`), to a graylog2 server. + +Installing +---------- + +go-gelf is go get-able: + + go get github.com/Graylog2/go-gelf/gelf + +Usage +----- + +The easiest way to integrate graylog logging into your go app is by +having your `main` function (or even `init`) call `log.SetOutput()`. +By using an `io.MultiWriter`, we can log to both stdout and graylog - +giving us both centralized and local logs. (Redundancy is nice). + + package main + + import ( + "flag" + "github.com/Graylog2/go-gelf/gelf" + "io" + "log" + "os" + ) + + func main() { + var graylogAddr string + + flag.StringVar(&graylogAddr, "graylog", "", "graylog server addr") + flag.Parse() + + if graylogAddr != "" { + gelfWriter, err := gelf.NewWriter(graylogAddr) + if err != nil { + log.Fatalf("gelf.NewWriter: %s", err) + } + // log to both stderr and graylog2 + log.SetOutput(io.MultiWriter(os.Stderr, gelfWriter)) + log.Printf("logging to stderr & graylog2@'%s'", graylogAddr) + } + + // From here on out, any calls to log.Print* functions + // will appear on stdout, and be sent over UDP to the + // specified Graylog2 server. + + log.Printf("Hello gray World") + + // ... + } + +The above program can be invoked as: + + go run test.go -graylog=localhost:12201 + +Because GELF messages are sent over UDP, graylog server availability +doesn't impact application performance or response time. There is a +small, fixed overhead per log call, regardless of whether the target +server is reachable or not. + +To Do +----- + +- WriteMessage example + +License +------- + +go-gelf is offered under the MIT license, see LICENSE for details. diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/reader.go b/vendor/github.com/Graylog2/go-gelf/gelf/reader.go new file mode 100644 index 0000000..ff719fc --- /dev/null +++ b/vendor/github.com/Graylog2/go-gelf/gelf/reader.go @@ -0,0 +1,140 @@ +// Copyright 2012 SocialCode. All rights reserved. +// Use of this source code is governed by the MIT +// license that can be found in the LICENSE file. + +package gelf + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "encoding/json" + "fmt" + "io" + "net" + "strings" + "sync" +) + +type Reader struct { + mu sync.Mutex + conn net.Conn +} + +func NewReader(addr string) (*Reader, error) { + var err error + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err) + } + + conn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return nil, fmt.Errorf("ListenUDP: %s", err) + } + + r := new(Reader) + r.conn = conn + return r, nil +} + +func (r *Reader) Addr() string { + return r.conn.LocalAddr().String() +} + +// FIXME: this will discard data if p isn't big enough to hold the +// full message. +func (r *Reader) Read(p []byte) (int, error) { + msg, err := r.ReadMessage() + if err != nil { + return -1, err + } + + var data string + + if msg.Full == "" { + data = msg.Short + } else { + data = msg.Full + } + + return strings.NewReader(data).Read(p) +} + +func (r *Reader) ReadMessage() (*Message, error) { + cBuf := make([]byte, ChunkSize) + var ( + err error + n, length int + cid, ocid []byte + seq, total uint8 + cHead []byte + cReader io.Reader + chunks [][]byte + ) + + for got := 0; got < 128 && (total == 0 || got < int(total)); got++ { + if n, err = r.conn.Read(cBuf); err != nil { + return nil, fmt.Errorf("Read: %s", err) + } + cHead, cBuf = cBuf[:2], cBuf[:n] + + if bytes.Equal(cHead, magicChunked) { + //fmt.Printf("chunked %v\n", cBuf[:14]) + cid, seq, total = cBuf[2:2+8], cBuf[2+8], cBuf[2+8+1] + if ocid != nil && !bytes.Equal(cid, ocid) { + return nil, fmt.Errorf("out-of-band message %v (awaited %v)", cid, ocid) + } else if ocid == nil { + ocid = cid + chunks = make([][]byte, total) + } + n = len(cBuf) - chunkedHeaderLen + //fmt.Printf("setting chunks[%d]: %d\n", seq, n) + chunks[seq] = append(make([]byte, 0, n), cBuf[chunkedHeaderLen:]...) + length += n + } else { //not chunked + if total > 0 { + return nil, fmt.Errorf("out-of-band message (not chunked)") + } + break + } + } + //fmt.Printf("\nchunks: %v\n", chunks) + + if length > 0 { + if cap(cBuf) < length { + cBuf = append(cBuf, make([]byte, 0, length-cap(cBuf))...) + } + cBuf = cBuf[:0] + for i := range chunks { + //fmt.Printf("appending %d %v\n", i, chunks[i]) + cBuf = append(cBuf, chunks[i]...) + } + cHead = cBuf[:2] + } + + // the data we get from the wire is compressed + if bytes.Equal(cHead, magicGzip) { + cReader, err = gzip.NewReader(bytes.NewReader(cBuf)) + } else if cHead[0] == magicZlib[0] && + (int(cHead[0])*256+int(cHead[1]))%31 == 0 { + // zlib is slightly more complicated, but correct + cReader, err = zlib.NewReader(bytes.NewReader(cBuf)) + } else { + // compliance with https://github.com/Graylog2/graylog2-server + // treating all messages as uncompressed if they are not gzip, zlib or + // chunked + cReader = bytes.NewReader(cBuf) + } + + if err != nil { + return nil, fmt.Errorf("NewReader: %s", err) + } + + msg := new(Message) + if err := json.NewDecoder(cReader).Decode(&msg); err != nil { + return nil, fmt.Errorf("json.Unmarshal: %s", err) + } + + return msg, nil +} diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/writer.go b/vendor/github.com/Graylog2/go-gelf/gelf/writer.go new file mode 100644 index 0000000..38e6944 --- /dev/null +++ b/vendor/github.com/Graylog2/go-gelf/gelf/writer.go @@ -0,0 +1,418 @@ +// Copyright 2012 SocialCode. All rights reserved. +// Use of this source code is governed by the MIT +// license that can be found in the LICENSE file. + +package gelf + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "compress/zlib" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "net" + "os" + "path" + "runtime" + "strings" + "sync" + "time" +) + +// Writer implements io.Writer and is used to send both discrete +// messages to a graylog2 server, or data from a stream-oriented +// interface (like the functions in log). +type Writer struct { + mu sync.Mutex + conn net.Conn + hostname string + Facility string // defaults to current process name + CompressionLevel int // one of the consts from compress/flate + CompressionType CompressType +} + +// What compression type the writer should use when sending messages +// to the graylog2 server +type CompressType int + +const ( + CompressGzip CompressType = iota + CompressZlib + CompressNone +) + +// Message represents the contents of the GELF message. It is gzipped +// before sending. +type Message struct { + Version string `json:"version"` + Host string `json:"host"` + Short string `json:"short_message"` + Full string `json:"full_message,omitempty"` + TimeUnix float64 `json:"timestamp"` + Level int32 `json:"level,omitempty"` + Facility string `json:"facility,omitempty"` + Extra map[string]interface{} `json:"-"` + RawExtra json.RawMessage `json:"-"` +} + +// Used to control GELF chunking. Should be less than (MTU - len(UDP +// header)). +// +// TODO: generate dynamically using Path MTU Discovery? +const ( + ChunkSize = 1420 + chunkedHeaderLen = 12 + chunkedDataLen = ChunkSize - chunkedHeaderLen +) + +var ( + magicChunked = []byte{0x1e, 0x0f} + magicZlib = []byte{0x78} + magicGzip = []byte{0x1f, 0x8b} +) + +// Syslog severity levels +const ( + LOG_EMERG = int32(0) + LOG_ALERT = int32(1) + LOG_CRIT = int32(2) + LOG_ERR = int32(3) + LOG_WARNING = int32(4) + LOG_NOTICE = int32(5) + LOG_INFO = int32(6) + LOG_DEBUG = int32(7) +) + +// numChunks returns the number of GELF chunks necessary to transmit +// the given compressed buffer. +func numChunks(b []byte) int { + lenB := len(b) + if lenB <= ChunkSize { + return 1 + } + return len(b)/chunkedDataLen + 1 +} + +// New returns a new GELF Writer. This writer can be used to send the +// output of the standard Go log functions to a central GELF server by +// passing it to log.SetOutput() +func NewWriter(addr string) (*Writer, error) { + var err error + w := new(Writer) + w.CompressionLevel = flate.BestSpeed + + if w.conn, err = net.Dial("udp", addr); err != nil { + return nil, err + } + if w.hostname, err = os.Hostname(); err != nil { + return nil, err + } + + w.Facility = path.Base(os.Args[0]) + + return w, nil +} + +// writes the gzip compressed byte array to the connection as a series +// of GELF chunked messages. The format is documented at +// http://docs.graylog.org/en/2.1/pages/gelf.html as: +// +// 2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte +// total, chunk-data +func (w *Writer) writeChunked(zBytes []byte) (err error) { + b := make([]byte, 0, ChunkSize) + buf := bytes.NewBuffer(b) + nChunksI := numChunks(zBytes) + if nChunksI > 128 { + return fmt.Errorf("msg too large, would need %d chunks", nChunksI) + } + nChunks := uint8(nChunksI) + // use urandom to get a unique message id + msgId := make([]byte, 8) + n, err := io.ReadFull(rand.Reader, msgId) + if err != nil || n != 8 { + return fmt.Errorf("rand.Reader: %d/%s", n, err) + } + + bytesLeft := len(zBytes) + for i := uint8(0); i < nChunks; i++ { + buf.Reset() + // manually write header. Don't care about + // host/network byte order, because the spec only + // deals in individual bytes. + buf.Write(magicChunked) //magic + buf.Write(msgId) + buf.WriteByte(i) + buf.WriteByte(nChunks) + // slice out our chunk from zBytes + chunkLen := chunkedDataLen + if chunkLen > bytesLeft { + chunkLen = bytesLeft + } + off := int(i) * chunkedDataLen + chunk := zBytes[off : off+chunkLen] + buf.Write(chunk) + + // write this chunk, and make sure the write was good + n, err := w.conn.Write(buf.Bytes()) + if err != nil { + return fmt.Errorf("Write (chunk %d/%d): %s", i, + nChunks, err) + } + if n != len(buf.Bytes()) { + return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)", + i, nChunks, n, len(buf.Bytes())) + } + + bytesLeft -= chunkLen + } + + if bytesLeft != 0 { + return fmt.Errorf("error: %d bytes left after sending", bytesLeft) + } + return nil +} + +// 1k bytes buffer by default +var bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, 1024)) + }, +} + +func newBuffer() *bytes.Buffer { + b := bufPool.Get().(*bytes.Buffer) + if b != nil { + b.Reset() + return b + } + return bytes.NewBuffer(nil) +} + +// WriteMessage sends the specified message to the GELF server +// specified in the call to New(). It assumes all the fields are +// filled out appropriately. In general, clients will want to use +// Write, rather than WriteMessage. +func (w *Writer) WriteMessage(m *Message) (err error) { + mBuf := newBuffer() + defer bufPool.Put(mBuf) + if err = m.MarshalJSONBuf(mBuf); err != nil { + return err + } + mBytes := mBuf.Bytes() + + var ( + zBuf *bytes.Buffer + zBytes []byte + ) + + var zw io.WriteCloser + switch w.CompressionType { + case CompressGzip: + zBuf = newBuffer() + defer bufPool.Put(zBuf) + zw, err = gzip.NewWriterLevel(zBuf, w.CompressionLevel) + case CompressZlib: + zBuf = newBuffer() + defer bufPool.Put(zBuf) + zw, err = zlib.NewWriterLevel(zBuf, w.CompressionLevel) + case CompressNone: + zBytes = mBytes + default: + panic(fmt.Sprintf("unknown compression type %d", + w.CompressionType)) + } + if zw != nil { + if err != nil { + return + } + if _, err = zw.Write(mBytes); err != nil { + zw.Close() + return + } + zw.Close() + zBytes = zBuf.Bytes() + } + + if numChunks(zBytes) > 1 { + return w.writeChunked(zBytes) + } + n, err := w.conn.Write(zBytes) + if err != nil { + return + } + if n != len(zBytes) { + return fmt.Errorf("bad write (%d/%d)", n, len(zBytes)) + } + + return nil +} + +// Close connection and interrupt blocked Read or Write operations +func (w *Writer) Close() error { + return w.conn.Close() +} + +/* +func (w *Writer) Alert(m string) (err error) +func (w *Writer) Close() error +func (w *Writer) Crit(m string) (err error) +func (w *Writer) Debug(m string) (err error) +func (w *Writer) Emerg(m string) (err error) +func (w *Writer) Err(m string) (err error) +func (w *Writer) Info(m string) (err error) +func (w *Writer) Notice(m string) (err error) +func (w *Writer) Warning(m string) (err error) +*/ + +// getCaller returns the filename and the line info of a function +// further down in the call stack. Passing 0 in as callDepth would +// return info on the function calling getCallerIgnoringLog, 1 the +// parent function, and so on. Any suffixes passed to getCaller are +// path fragments like "/pkg/log/log.go", and functions in the call +// stack from that file are ignored. +func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) { + // bump by 1 to ignore the getCaller (this) stackframe + callDepth++ +outer: + for { + var ok bool + _, file, line, ok = runtime.Caller(callDepth) + if !ok { + file = "???" + line = 0 + break + } + + for _, s := range suffixesToIgnore { + if strings.HasSuffix(file, s) { + callDepth++ + continue outer + } + } + break + } + return +} + +func getCallerIgnoringLogMulti(callDepth int) (string, int) { + // the +1 is to ignore this (getCallerIgnoringLogMulti) frame + return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go") +} + +// Write encodes the given string in a GELF message and sends it to +// the server specified in New(). +func (w *Writer) Write(p []byte) (n int, err error) { + + // 1 for the function that called us. + file, line := getCallerIgnoringLogMulti(1) + + // remove trailing and leading whitespace + p = bytes.TrimSpace(p) + + // If there are newlines in the message, use the first line + // for the short message and set the full message to the + // original input. If the input has no newlines, stick the + // whole thing in Short. + short := p + full := []byte("") + if i := bytes.IndexRune(p, '\n'); i > 0 { + short = p[:i] + full = p + } + + m := Message{ + Version: "1.1", + Host: w.hostname, + Short: string(short), + Full: string(full), + TimeUnix: float64(time.Now().Unix()), + Level: 6, // info + Facility: w.Facility, + Extra: map[string]interface{}{ + "_file": file, + "_line": line, + }, + } + + if err = w.WriteMessage(&m); err != nil { + return 0, err + } + + return len(p), nil +} + +func (m *Message) MarshalJSONBuf(buf *bytes.Buffer) error { + b, err := json.Marshal(m) + if err != nil { + return err + } + // write up until the final } + if _, err = buf.Write(b[:len(b)-1]); err != nil { + return err + } + if len(m.Extra) > 0 { + eb, err := json.Marshal(m.Extra) + if err != nil { + return err + } + // merge serialized message + serialized extra map + if err = buf.WriteByte(','); err != nil { + return err + } + // write serialized extra bytes, without enclosing quotes + if _, err = buf.Write(eb[1 : len(eb)-1]); err != nil { + return err + } + } + + if len(m.RawExtra) > 0 { + if err := buf.WriteByte(','); err != nil { + return err + } + + // write serialized extra bytes, without enclosing quotes + if _, err = buf.Write(m.RawExtra[1 : len(m.RawExtra)-1]); err != nil { + return err + } + } + + // write final closing quotes + return buf.WriteByte('}') +} + +func (m *Message) UnmarshalJSON(data []byte) error { + i := make(map[string]interface{}, 16) + if err := json.Unmarshal(data, &i); err != nil { + return err + } + for k, v := range i { + if k[0] == '_' { + if m.Extra == nil { + m.Extra = make(map[string]interface{}, 1) + } + m.Extra[k] = v + continue + } + switch k { + case "version": + m.Version = v.(string) + case "host": + m.Host = v.(string) + case "short_message": + m.Short = v.(string) + case "full_message": + m.Full = v.(string) + case "timestamp": + m.TimeUnix = v.(float64) + case "level": + m.Level = int32(v.(float64)) + case "facility": + m.Facility = v.(string) + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 0000000..b8b569d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 0000000..5680010 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,22 @@ +# go-winio + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. + +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go new file mode 100644 index 0000000..0378401 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "time" +) + +const ( + blockSize = 512 + + // Types + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + CreationTime time.Time // creation time + Xattrs map[string]string + Winheaders map[string]string +} + +// File name constants from the tar spec. +const ( + fileNameSize = 100 // Maximum number of bytes in a standard tar name. + fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. +) + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + // symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxCreationTime = "LIBARCHIVE.creationtime" + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxWindows = "MSWINDOWS." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +var zeroBlock = make([]byte, blockSize) + +// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. +// We compute and return both. +func checksum(header []byte) (unsigned int64, signed int64) { + for i := 0; i < len(header); i++ { + if i == 148 { + // The chksum field (header[148:156]) is special: it should be treated as space bytes. + unsigned += ' ' * 8 + signed += ' ' * 8 + i += 7 + continue + } + unsigned += int64(header[i]) + signed += int64(int8(header[i])) + } + return +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go new file mode 100644 index 0000000..e210c61 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go @@ -0,0 +1,1002 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "math" + "os" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +const maxNanoSecondIntSize = 9 + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + hdrBuff [blockSize]byte // buffer to use in readHeader +} + +type parser struct { + err error // Last error seen +} + +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a +// tar archive. +type sparseFileReader struct { + rfr numBytesReader // Reads the sparse-encoded file data + sp []sparseEntry // The sparse map for the file + pos int64 // Keeps track of file position + total int64 // Total size of the file +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// +// Sparse files are represented using a series of sparseEntrys. +// Despite the name, a sparseEntry represents an actual data fragment that +// references data found in the underlying archive stream. All regions not +// covered by a sparseEntry are logically filled with zeros. +// +// For example, if the underlying raw file contains the 10-byte data: +// var compactData = "abcdefgh" +// +// And the sparse map has the following entries: +// var sp = []sparseEntry{ +// {offset: 2, numBytes: 5} // Data fragment for [2..7] +// {offset: 18, numBytes: 3} // Data fragment for [18..21] +// } +// +// Then the content of the resulting sparse file with a "real" size of 25 is: +// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type sparseEntry struct { + offset int64 // Starting position of the fragment + numBytes int64 // Length of the fragment +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// Keywords for old GNU sparse headers +const ( + oldGNUSparseMainHeaderOffset = 386 + oldGNUSparseMainHeaderIsExtendedOffset = 482 + oldGNUSparseMainHeaderNumEntries = 4 + oldGNUSparseExtendedHeaderIsExtendedOffset = 504 + oldGNUSparseExtendedHeaderNumEntries = 21 + oldGNUSparseOffsetSize = 12 + oldGNUSparseNumBytesSize = 12 +) + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.err != nil { + return nil, tr.err + } + + var hdr *Header + var extHdrs map[string]string + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". +loop: + for { + tr.err = tr.skipUnread() + if tr.err != nil { + return nil, tr.err + } + + hdr = tr.readHeader() + if tr.err != nil { + return nil, tr.err + } + + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader: + extHdrs, tr.err = parsePAX(tr) + if tr.err != nil { + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + var realname []byte + realname, tr.err = ioutil.ReadAll(tr) + if tr.err != nil { + return nil, tr.err + } + + // Convert GNU extensions to use PAX headers. + if extHdrs == nil { + extHdrs = make(map[string]string) + } + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + extHdrs[paxPath] = p.parseString(realname) + case TypeGNULongLink: + extHdrs[paxLinkpath] = p.parseString(realname) + } + if p.err != nil { + tr.err = p.err + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + default: + mergePAX(hdr, extHdrs) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) + if err != nil { + tr.err = err + return nil, err + } + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil, tr.err + } + } + break loop // This is a file, so stop + } + } + return hdr, nil +} + +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) error { + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxGname: + hdr.Gname = v + case paxUname: + hdr.Uname = v + case paxUid: + uid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Uid = int(uid) + case paxGid: + gid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Gid = int(gid) + case paxAtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.AccessTime = t + case paxMtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ModTime = t + case paxCtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ChangeTime = t + case paxCreationTime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.CreationTime = t + case paxSize: + size, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Size = int64(size) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } else if strings.HasPrefix(k, paxWindows) { + if hdr.Winheaders == nil { + hdr.Winheaders = make(map[string]string) + } + hdr.Winheaders[k[len(paxWindows):]] = v + } + } + } + return nil +} + +// parsePAXTime takes a string of the form %d.%d as described in +// the PAX specification. +func parsePAXTime(t string) (time.Time, error) { + buf := []byte(t) + pos := bytes.IndexByte(buf, '.') + var seconds, nanoseconds int64 + var err error + if pos == -1 { + seconds, err = strconv.ParseInt(t, 10, 0) + if err != nil { + return time.Time{}, err + } + } else { + seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) + if err != nil { + return time.Time{}, err + } + nano_buf := string(buf[pos+1:]) + // Pad as needed before converting to a decimal. + // For example .030 -> .030000000 -> 30000000 nanoseconds + if len(nano_buf) < maxNanoSecondIntSize { + // Right pad + nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) + } else if len(nano_buf) > maxNanoSecondIntSize { + // Right truncate + nano_buf = nano_buf[:maxNanoSecondIntSize] + } + nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) + if err != nil { + return time.Time{}, err + } + } + ts := time.Unix(seconds, nanoseconds) + return ts, nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. + var sparseMap bytes.Buffer + + headers := make(map[string]string) + // Each record is constructed as + // "%d %s=%s\n", length, keyword, value + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + keyStr := string(key) + if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { + // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. + sparseMap.WriteString(value) + sparseMap.Write([]byte{','}) + } else { + // Normal key. Set the value in the headers map. + headers[keyStr] = string(value) + } + } + if sparseMap.Len() != 0 { + // Add sparse info to headers, chopping off the extra comma + sparseMap.Truncate(sparseMap.Len() - 1) + headers[paxGNUSparseMap] = sparseMap.String() + } + return headers, nil +} + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +// +// A PAX record is of the following form: +// "%d %s=%s\n" % (size, key, value) +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + return rec[:eq], rec[eq+1:], rem, nil +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +// parseNumeric parses the input as being encoded in either base-256 or octal. +// This function may return negative numbers. +// If parsing fails or an integer overflow occurs, err will be set. +func (p *parser) parseNumeric(b []byte) int64 { + // Check for base-256 (binary) format first. + // If the first bit is set, then all following bits constitute a two's + // complement encoded number in big-endian byte order. + if len(b) > 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any +// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is +// encountered in the data portion; it is okay to hit io.EOF in the padding. +// +// Note that this function still works properly even when sparse files are being +// used since numBytes returns the bytes remaining in the underlying io.Reader. +func (tr *Reader) skipUnread() error { + dataSkip := tr.numBytes() // Number of data bytes to skip + totalSkip := dataSkip + tr.pad // Total number of bytes to skip + tr.curr, tr.pad = nil, 0 + + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the tar stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, os.SEEK_CUR) + if err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR) + if err != nil { + tr.err = err + return tr.err + } + seekSkipped = pos2 - pos1 + } + } + + var copySkipped int64 // Number of bytes skipped via CopyN + copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) + if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip { + tr.err = io.ErrUnexpectedEOF + } + return tr.err +} + +func (tr *Reader) verifyChecksum(header []byte) bool { + if tr.err != nil { + return false + } + + var p parser + given := p.parseOctal(header[148:156]) + unsigned, signed := checksum(header) + return p.err == nil && (given == unsigned || given == signed) +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() *Header { + header := tr.hdrBuff[:] + copy(header, zeroBlock) + + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil // io.EOF is okay here + } + + // Two blocks of zero bytes marks the end of the archive. + if bytes.Equal(header, zeroBlock[0:blockSize]) { + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil // io.EOF is okay here + } + if bytes.Equal(header, zeroBlock[0:blockSize]) { + tr.err = io.EOF + } else { + tr.err = ErrHeader // zero block and then non-zero block + } + return nil + } + + if !tr.verifyChecksum(header) { + tr.err = ErrHeader + return nil + } + + // Unpack + var p parser + hdr := new(Header) + s := slicer(header) + + hdr.Name = p.parseString(s.next(100)) + hdr.Mode = p.parseNumeric(s.next(8)) + hdr.Uid = int(p.parseNumeric(s.next(8))) + hdr.Gid = int(p.parseNumeric(s.next(8))) + hdr.Size = p.parseNumeric(s.next(12)) + hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0) + s.next(8) // chksum + hdr.Typeflag = s.next(1)[0] + hdr.Linkname = p.parseString(s.next(100)) + + // The remainder of the header depends on the value of magic. + // The original (v7) version of tar had no explicit magic field, + // so its magic bytes, like the rest of the block, are NULs. + magic := string(s.next(8)) // contains version field as well. + var format string + switch { + case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) + if string(header[508:512]) == "tar\x00" { + format = "star" + } else { + format = "posix" + } + case magic == "ustar \x00": // old GNU tar + format = "gnu" + } + + switch format { + case "posix", "gnu", "star": + hdr.Uname = p.parseString(s.next(32)) + hdr.Gname = p.parseString(s.next(32)) + devmajor := s.next(8) + devminor := s.next(8) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = p.parseNumeric(devmajor) + hdr.Devminor = p.parseNumeric(devminor) + } + var prefix string + switch format { + case "posix", "gnu": + prefix = p.parseString(s.next(155)) + case "star": + prefix = p.parseString(s.next(131)) + hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + + if p.err != nil { + tr.err = p.err + return nil + } + + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + tr.err = ErrHeader + return nil + } + + // Set the current file reader. + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + tr.curr = ®FileReader{r: tr.r, nb: nb} + + // Check for old GNU sparse format entry. + if hdr.Typeflag == TypeGNUSparse { + // Get the real size of the file. + hdr.Size = p.parseNumeric(header[483:495]) + if p.err != nil { + tr.err = p.err + return nil + } + + // Read the sparse map. + sp := tr.readOldGNUSparseMap(header) + if tr.err != nil { + return nil + } + + // Current file is a GNU sparse file. Update the current file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil + } + } + + return hdr +} + +// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, +// then one or more extension headers are used to store the rest of the sparse map. +func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + var p parser + isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 + spCap := oldGNUSparseMainHeaderNumEntries + if isExtended { + spCap += oldGNUSparseExtendedHeaderNumEntries + } + sp := make([]sparseEntry, 0, spCap) + s := slicer(header[oldGNUSparseMainHeaderOffset:]) + + // Read the four entries from the main tar header + for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + for isExtended { + // There are more entries. Read an extension header and parse its entries. + sparseHeader := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { + return nil + } + isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 + s = slicer(sparseHeader) + for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + } + return sp +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, numBytes). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + var cntNewline int64 + var buf bytes.Buffer + var blk = make([]byte, blockSize) + + // feedTokens copies data in numBlock chunks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + var feedTokens = func(cnt int64) error { + for cntNewline < cnt { + if _, err := io.ReadFull(r, blk); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + buf.Write(blk) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + var nextToken = func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return tok[:len(tok)-1] // Cut off newline + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +// +// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.err != nil { + return 0, tr.err + } + if tr.curr == nil { + return 0, io.EOF + } + + n, err = tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { + // file consumed + return 0, io.EOF + } + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] + } + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) + + if err == io.EOF && rfr.nb > 0 { + err = io.ErrUnexpectedEOF + } + return +} + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// newSparseFileReader creates a new sparseFileReader, but validates all of the +// sparse entries before doing so. +func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { + if total < 0 { + return nil, ErrHeader // Total size cannot be negative + } + + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + for i, s := range sp { + switch { + case s.offset < 0 || s.numBytes < 0: + return nil, ErrHeader // Negative values are never okay + case s.offset > math.MaxInt64-s.numBytes: + return nil, ErrHeader // Integer overflow with large length + case s.offset+s.numBytes > total: + return nil, ErrHeader // Region extends beyond the "real" size + case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: + return nil, ErrHeader // Regions can't overlap and must be in order + } + } + return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil +} + +// readHole reads a sparse hole ending at endOffset. +func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { + n64 := endOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + // Skip past all empty fragments. + for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { + sfr.sp = sfr.sp[1:] + } + + // If there are no more fragments, then it is possible that there + // is one last sparse hole. + if len(sfr.sp) == 0 { + // This behavior matches the BSD tar utility. + // However, GNU tar stops returning data even if sfr.total is unmet. + if sfr.pos < sfr.total { + return sfr.readHole(b, sfr.total), nil + } + return 0, io.EOF + } + + // In front of a data fragment, so read a hole. + if sfr.pos < sfr.sp[0].offset { + return sfr.readHole(b, sfr.sp[0].offset), nil + } + + // In a data fragment, so read from it. + // This math is overflow free since we verify that offset and numBytes can + // be safely added when creating the sparseFileReader. + endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment + bytesLeft := endPos - sfr.pos // Bytes left in fragment + if int64(len(b)) > bytesLeft { + b = b[:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + if err == io.EOF { + if sfr.pos < endPos { + err = io.ErrUnexpectedEOF // There was supposed to be more data + } else if sfr.pos < sfr.total { + err = nil // There is still an implicit sparse hole at the end + } + } + + if sfr.pos == endPos { + sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it + } + return n, err +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.numBytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go new file mode 100644 index 0000000..cf9cc79 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go new file mode 100644 index 0000000..6f17dbe --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go new file mode 100644 index 0000000..cb843db --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go new file mode 100644 index 0000000..30d7e60 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go @@ -0,0 +1,444 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "path" + "sort" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header + hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header + paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header +} + +type formatter struct { + err error // Last error seen +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +// Write s into b, terminating it with a NUL if there is room. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// Encode x as an octal ASCII string and write it into b with leading zeros. +func (f *formatter) formatOctal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // leading zeros, but leave room for a NUL. + for len(s)+1 < len(b) { + s = "0" + s + } + f.formatString(b, s) +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + var binBits = uint(n-1) * 8 + return n >= 9 || (x >= -1<= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(shanemhansen): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + var f formatter + var header []byte + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header = tw.hdrBuff[:] + if !allowPax { + header = tw.paxHdrBuff[:] + } + copy(header, zeroBlock) + s := slicer(header) + + // Wrappers around formatter that automatically sets paxHeaders if the + // argument extends beyond the capacity of the input byte slice. + var formatString = func(b []byte, s string, paxKeyword string) { + needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + f.formatString(b, s) + } + var formatNumeric = func(b []byte, x int64, paxKeyword string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + f.formatOctal(b, x) + return + } + + // If it is too long for octal, and PAX is preferred, use a PAX header. + if paxKeyword != paxNone && tw.preferPax { + f.formatOctal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + tw.usedBinary = true + f.formatNumeric(b, x) + } + var formatTime = func(b []byte, t time.Time, paxKeyword string) { + var unixTime int64 + if !t.Before(minTime) && !t.After(maxTime) { + unixTime = t.Unix() + } + formatNumeric(b, unixTime, paxNone) + + // Write a PAX header if the time didn't fit precisely. + if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) { + paxHeaders[paxKeyword] = formatPAXTime(t) + } + } + + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + pathHeaderBytes := s.next(fileNameSize) + + formatString(pathHeaderBytes, hdr.Name, paxPath) + + f.formatOctal(s.next(8), hdr.Mode) // 100:108 + formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116 + formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124 + formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136 + formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148 + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 + + formatString(s.next(100), hdr.Linkname, paxLinkpath) + + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + formatString(s.next(32), hdr.Uname, paxUname) // 265:297 + formatString(s.next(32), hdr.Gname, paxGname) // 297:329 + formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337 + formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345 + + // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + prefixHeaderBytes := s.next(155) + formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix + + // Use the GNU magic instead of POSIX magic if we used any GNU extensions. + if tw.usedBinary { + copy(header[257:265], []byte("ustar \x00")) + } + + _, paxPathUsed := paxHeaders[paxPath] + // try to use a ustar header when only the name is too long + if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + prefix, suffix, ok := splitUSTARPath(hdr.Name) + if ok { + // Since we can encode in USTAR format, disable PAX header. + delete(paxHeaders, paxPath) + + // Update the path fields + formatString(pathHeaderBytes, suffix, paxNone) + formatString(prefixHeaderBytes, prefix, paxNone) + } + } + + // The chksum field is terminated by a NUL and a space. + // This is different from the other octal fields. + chksum, _ := checksum(header) + f.formatOctal(header[148:155], chksum) // Never fails + header[155] = ' ' + + // Check if there were any formatting errors. + if f.err != nil { + tw.err = f.err + return tw.err + } + + if allowPax { + if !hdr.AccessTime.IsZero() { + paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime) + } + if !hdr.ChangeTime.IsZero() { + paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime) + } + if !hdr.CreationTime.IsZero() { + paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime) + } + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + for k, v := range hdr.Winheaders { + paxHeaders[paxWindows+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = int64(hdr.Size) + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header) + return tw.err +} + +func formatPAXTime(t time.Time) string { + sec := t.Unix() + usec := t.Nanosecond() + s := strconv.FormatInt(sec, 10) + if usec != 0 { + s = fmt.Sprintf("%s.%09d", s, usec) + } + return s +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= fileNameSize || !isASCII(name) { + return "", "", false + } else if length > fileNamePrefixSize+1 { + length = fileNamePrefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. However, this results in differing outputs + // for identical inputs. As such, the constant 0 is now used instead. + // golang.org/issue/12358 + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, "PaxHeaders.0", file) + + ascii := toASCII(fullName) + if len(ascii) > 100 { + ascii = ascii[:100] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + // Keys are sorted before writing to body to allow deterministic output. + var keys []string + for k := range paxHeaders { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) string { + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s=%s\n", size, k, v) + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = fmt.Sprintf("%d %s=%s\n", size, k, v) + } + return record +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 0000000..27d6ace --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,270 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go new file mode 100644 index 0000000..d39eccf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go @@ -0,0 +1,4 @@ +// +build !windows +// This file only exists to allow go get on non-Windows platforms. + +package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go new file mode 100644 index 0000000..cfbcedf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -0,0 +1,353 @@ +// +build windows + +package backuptar + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface +) + +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +const ( + hdrFileAttributes = "fileattr" + hdrSecurityDescriptor = "sd" + hdrRawSecurityDescriptor = "rawsd" + hdrMountPoint = "mountpoint" +) + +func writeZeroes(w io.Writer, count int64) error { + buf := make([]byte, 8192) + c := len(buf) + for i := int64(0); i < count; i += int64(c) { + if int64(c) > count-i { + c = int(count - i) + } + _, err := w.Write(buf[:c]) + if err != nil { + return err + } + } + return nil +} + +func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { + curOffset := int64(0) + for { + bhdr, err := br.Next() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if bhdr.Id != winio.BackupSparseBlock { + return fmt.Errorf("unexpected stream %d", bhdr.Id) + } + + // archive/tar does not support writing sparse files + // so just write zeroes to catch up to the current offset. + err = writeZeroes(t, bhdr.Offset-curOffset) + if bhdr.Size == 0 { + break + } + n, err := io.Copy(t, br) + if err != nil { + return err + } + curOffset = bhdr.Offset + n + } + return nil +} + +// BasicInfoHeader creates a tar header from basic file information. +func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { + hdr := &tar.Header{ + Name: filepath.ToSlash(name), + Size: size, + Typeflag: tar.TypeReg, + ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), + ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), + AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), + CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()), + Winheaders: make(map[string]string), + } + hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + hdr.Mode |= c_ISDIR + hdr.Size = 0 + hdr.Typeflag = tar.TypeDir + } + return hdr +} + +// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. +// +// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. +// +// The additional Win32 metadata is: +// +// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value +// +// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format +// +// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) +func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { + name = filepath.ToSlash(name) + hdr := BasicInfoHeader(name, size, fileInfo) + br := winio.NewBackupStreamReader(r) + var dataHdr *winio.BackupHeader + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupData: + hdr.Mode |= c_ISREG + dataHdr = bhdr + case winio.BackupSecurity: + sd, err := ioutil.ReadAll(br) + if err != nil { + return err + } + hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) + + case winio.BackupReparseData: + hdr.Mode |= c_ISLNK + hdr.Typeflag = tar.TypeSymlink + reparseBuffer, err := ioutil.ReadAll(br) + rp, err := winio.DecodeReparsePoint(reparseBuffer) + if err != nil { + return err + } + if rp.IsMountPoint { + hdr.Winheaders[hdrMountPoint] = "1" + } + hdr.Linkname = rp.Target + case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) + } + } + + err := t.WriteHeader(hdr) + if err != nil { + return err + } + + if dataHdr != nil { + // A data stream was found. Copy the data. + if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 { + if size != dataHdr.Size { + return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + } else { + err = copySparse(t, br) + if err != nil { + return err + } + } + } + + // Look for streams after the data stream. The only ones we handle are alternate data streams. + // Other streams may have metadata that could be serialized, but the tar header has already + // been written. In practice, this means that we don't get EA or TXF metadata. + for { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupAlternateData: + altName := bhdr.Name + if strings.HasSuffix(altName, ":$DATA") { + altName = altName[:len(altName)-len(":$DATA")] + } + if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { + hdr = &tar.Header{ + Name: name + altName, + Mode: hdr.Mode, + Typeflag: tar.TypeReg, + Size: bhdr.Size, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + err = t.WriteHeader(hdr) + if err != nil { + return err + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + + } else { + // Unsupported for now, since the size of the alternate stream is not present + // in the backup stream until after the data has been read. + return errors.New("tar of sparse alternate data streams is unsupported") + } + case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) + } + } + return nil +} + +// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by +// WriteTarFileFromBackupStream. +func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { + name = hdr.Name + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + size = hdr.Size + } + fileInfo = &winio.FileBasicInfo{ + LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()), + LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()), + ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()), + CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()), + } + if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok { + attr, err := strconv.ParseUint(attrStr, 10, 32) + if err != nil { + return "", 0, nil, err + } + fileInfo.FileAttributes = uintptr(attr) + } else { + if hdr.Typeflag == tar.TypeDir { + fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + } + } + return +} + +// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple +// tar file entries in order to collect all the alternate data streams for the file, it returns the next +// tar file that was not processed, or io.EOF is there are no more. +func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { + bw := winio.NewBackupStreamWriter(w) + var sd []byte + var err error + // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written + // by this library will have raw binary for the security descriptor. + if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + if len(sd) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupSecurity, + Size: int64(len(sd)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(sd) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeSymlink { + _, isMountPoint := hdr.Winheaders[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + reparse := winio.EncodeReparsePoint(&rp) + bhdr := winio.BackupHeader{ + Id: winio.BackupReparseData, + Size: int64(len(reparse)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(reparse) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + bhdr := winio.BackupHeader{ + Id: winio.BackupData, + Size: hdr.Size, + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } + // Copy all the alternate data streams and return the next non-ADS header. + for { + ahdr, err := t.Next() + if err != nil { + return nil, err + } + if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { + return ahdr, nil + } + bhdr := winio.BackupHeader{ + Id: winio.BackupAlternateData, + Size: ahdr.Size, + Name: ahdr.Name[len(hdr.Name):] + ":$DATA", + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 0000000..613f31b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,295 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + closing bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return makeWin32File(h) +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + if !f.closing { + // cancel all IO and wait for it to complete + f.closing = true + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wg.Add(1) + if f.closing { + return nil, ErrFileClosed + } + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + // Set the timer resolution to 1. This fixes a performance regression in golang 1.6. + timeBeginPeriod(1) + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing { + err = ErrFileClosed + } + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 0000000..b1d60ab --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,60 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "syscall" + "unsafe" +) + +//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx +//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle + +const ( + fileBasicInfo = 0 + fileIDInfo = 0x12 +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + FileAttributes uintptr // includes padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 0000000..da706cc --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,404 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cPIPE_ACCESS_DUPLEX = 0x3 + cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + + cPIPE_UNLIMITED_INSTANCES = 255 + + cNMPWAIT_USE_DEFAULT_WAIT = 0 + cNMPWAIT_NOWAIT = 1 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then the timeout +// is the default timeout established by the pipe server. +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } + var err error + var h syscall.Handle + for { + h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != cERROR_PIPE_BUSY { + break + } + now := time.Now() + var ms uint32 + if absTimeout.IsZero() { + ms = cNMPWAIT_USE_DEFAULT_WAIT + } else if now.After(absTimeout) { + ms = cNMPWAIT_NOWAIT + } else { + ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) + } + err = waitNamedPipe(path, ms) + if err != nil { + if err == cERROR_SEM_TIMEOUT { + return nil, ErrTimeout + } + break + } + } + if err != nil { + return nil, &os.PathError{Op: "open", Path: path, Err: err} + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + var state uint32 + err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) + if err != nil { + return nil, err + } + + if state&cPIPE_READMODE_MESSAGE != 0 { + return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + securityDescriptor []byte + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED + if first { + flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + } + + var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + if c.MessageMode { + mode |= cPIPE_TYPE_MESSAGE + } + + sa := &syscall.SecurityAttributes{} + sa.Length = uint32(unsafe.Sizeof(*sa)) + if securityDescriptor != nil { + len := uint32(len(securityDescriptor)) + sa.SecurityDescriptor = localAlloc(0, len) + defer localFree(sa.SecurityDescriptor) + copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) + } + h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + p, err := l.makeServerPipe() + if err == nil { + // Wait for the client to connect. + ch := make(chan error) + go func() { + ch <- connectPipe(p) + }() + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + closed = true + } + } + responseCh <- acceptResponse{p, err} + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size the input buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + // Immediately open and then close a client handle so that the named pipe is + // created but not currently accepting connections. + h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != nil { + syscall.Close(h) + return nil, err + } + syscall.Close(h2) + l := &win32PipeListener{ + firstHandle: h, + path: path, + securityDescriptor: sd, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 0000000..9c83d36 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,202 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 0000000..fc1ee4d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 0000000..db1b370 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 0000000..20d64cf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 0000000..4f7a52e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,528 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modwinmm = windows.NewLazySystemDLL("winmm.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") +) + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func timeBeginPeriod(period uint32) (n int32) { + r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + n = int32(r0) + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name string, timeout uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _waitNamedPipe(_p0, timeout) +} + +func _waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 0000000..49d2166 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md new file mode 100644 index 0000000..30991a1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -0,0 +1,12 @@ +# hcsshim + +This package supports launching Windows Server containers from Go. It is +primarily used in the [Docker Engine](https://github.com/docker/docker) project, +but it can be freely used by other projects as well. + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Microsoft/hcsshim/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/activatelayer.go new file mode 100644 index 0000000..efc4d80 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/activatelayer.go @@ -0,0 +1,28 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// ActivateLayer will find the layer with the given id and mount it's filesystem. +// For a read/write layer, the mounted filesystem will appear as a volume on the +// host, while a read-only layer is generally expected to be a no-op. +// An activated layer must later be deactivated via DeactivateLayer. +func ActivateLayer(info DriverInfo, id string) error { + title := "hcsshim::ActivateLayer " + logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) + + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = activateLayer(&infop, id) + if err != nil { + err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour) + logrus.Error(err) + return err + } + + logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/baselayer.go b/vendor/github.com/Microsoft/hcsshim/baselayer.go new file mode 100644 index 0000000..9babd4e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/baselayer.go @@ -0,0 +1,183 @@ +package hcsshim + +import ( + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio" +) + +type baseLayerWriter struct { + root string + f *os.File + bw *winio.BackupFileWriter + err error + hasUtilityVM bool + dirInfo []dirInfo +} + +type dirInfo struct { + path string + fileInfo winio.FileBasicInfo +} + +// reapplyDirectoryTimes reapplies directory modification, creation, etc. times +// after processing of the directory tree has completed. The times are expected +// to be ordered such that parent directories come before child directories. +func reapplyDirectoryTimes(dis []dirInfo) error { + for i := range dis { + di := &dis[len(dis)-i-1] // reverse order: process child directories first + f, err := winio.OpenForBackup(di.path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, syscall.OPEN_EXISTING) + if err != nil { + return err + } + + err = winio.SetFileBasicInfo(f, &di.fileInfo) + f.Close() + if err != nil { + return err + } + } + return nil +} + +func (w *baseLayerWriter) closeCurrentFile() error { + if w.f != nil { + err := w.bw.Close() + err2 := w.f.Close() + w.f = nil + w.bw = nil + if err != nil { + return err + } + if err2 != nil { + return err2 + } + } + return nil +} + +func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + if filepath.ToSlash(name) == `UtilityVM/Files` { + w.hasUtilityVM = true + } + + path := filepath.Join(w.root, name) + path, err = makeLongAbsPath(path) + if err != nil { + return err + } + + var f *os.File + defer func() { + if f != nil { + f.Close() + } + }() + + createmode := uint32(syscall.CREATE_NEW) + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + err := os.Mkdir(path, 0) + if err != nil && !os.IsExist(err) { + return err + } + createmode = syscall.OPEN_EXISTING + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { + w.dirInfo = append(w.dirInfo, dirInfo{path, *fileInfo}) + } + } + + mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) + f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode) + if err != nil { + return makeError(err, "Failed to OpenForBackup", path) + } + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return makeError(err, "Failed to SetFileBasicInfo", path) + } + + w.f = f + w.bw = winio.NewBackupFileWriter(f, true) + f = nil + return nil +} + +func (w *baseLayerWriter) AddLink(name string, target string) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + linkpath, err := makeLongAbsPath(filepath.Join(w.root, name)) + if err != nil { + return err + } + + linktarget, err := makeLongAbsPath(filepath.Join(w.root, target)) + if err != nil { + return err + } + + return os.Link(linktarget, linkpath) +} + +func (w *baseLayerWriter) Remove(name string) error { + return errors.New("base layer cannot have tombstones") +} + +func (w *baseLayerWriter) Write(b []byte) (int, error) { + n, err := w.bw.Write(b) + if err != nil { + w.err = err + } + return n, err +} + +func (w *baseLayerWriter) Close() error { + err := w.closeCurrentFile() + if err != nil { + return err + } + if w.err == nil { + // Restore the file times of all the directories, since they may have + // been modified by creating child directories. + err = reapplyDirectoryTimes(w.dirInfo) + if err != nil { + return err + } + + err = ProcessBaseLayer(w.root) + if err != nil { + return err + } + + if w.hasUtilityVM { + err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM")) + if err != nil { + return err + } + } + } + return w.err +} diff --git a/vendor/github.com/Microsoft/hcsshim/callback.go b/vendor/github.com/Microsoft/hcsshim/callback.go new file mode 100644 index 0000000..e8c2b00 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/callback.go @@ -0,0 +1,79 @@ +package hcsshim + +import ( + "sync" + "syscall" +) + +var ( + nextCallback uintptr + callbackMap = map[uintptr]*notifcationWatcherContext{} + callbackMapLock = sync.RWMutex{} + + notificationWatcherCallback = syscall.NewCallback(notificationWatcher) + + // Notifications for HCS_SYSTEM handles + hcsNotificationSystemExited hcsNotification = 0x00000001 + hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 + hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 + hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 + hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 + + // Notifications for HCS_PROCESS handles + hcsNotificationProcessExited hcsNotification = 0x00010000 + + // Common notifications + hcsNotificationInvalid hcsNotification = 0x00000000 + hcsNotificationServiceDisconnect hcsNotification = 0x01000000 +) + +type hcsNotification uint32 +type notificationChannel chan error + +type notifcationWatcherContext struct { + channels notificationChannels + handle hcsCallback +} + +type notificationChannels map[hcsNotification]notificationChannel + +func newChannels() notificationChannels { + channels := make(notificationChannels) + + channels[hcsNotificationSystemExited] = make(notificationChannel, 1) + channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1) + channels[hcsNotificationProcessExited] = make(notificationChannel, 1) + channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1) + return channels +} +func closeChannels(channels notificationChannels) { + close(channels[hcsNotificationSystemExited]) + close(channels[hcsNotificationSystemCreateCompleted]) + close(channels[hcsNotificationSystemStartCompleted]) + close(channels[hcsNotificationSystemPauseCompleted]) + close(channels[hcsNotificationSystemResumeCompleted]) + close(channels[hcsNotificationProcessExited]) + close(channels[hcsNotificationServiceDisconnect]) +} + +func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { + var result error + if int32(notificationStatus) < 0 { + result = syscall.Errno(win32FromHresult(notificationStatus)) + } + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return 0 + } + + context.channels[notificationType] <- result + + return 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/cgo.go b/vendor/github.com/Microsoft/hcsshim/cgo.go new file mode 100644 index 0000000..2003332 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cgo.go @@ -0,0 +1,7 @@ +package hcsshim + +import "C" + +// This import is needed to make the library compile as CGO because HCSSHIM +// only works with CGO due to callbacks from HCS comming back from a C thread +// which is not supported without CGO. See https://github.com/golang/go/issues/10973 diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go new file mode 100644 index 0000000..8bc7d6d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -0,0 +1,738 @@ +package hcsshim + +import ( + "encoding/json" + "fmt" + "os" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +var ( + defaultTimeout = time.Minute * 4 +) + +const ( + pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}` + statisticsQuery = `{ "PropertyTypes" : ["Statistics"]}` + processListQuery = `{ "PropertyTypes" : ["ProcessList"]}` +) + +type container struct { + handleLock sync.RWMutex + handle hcsSystem + id string + callbackNumber uintptr +} + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties struct { + ID string `json:"Id"` + Name string + SystemType string + Owner string + SiloGUID string `json:"SiloGuid,omitempty"` + RuntimeID string `json:"RuntimeId,omitempty"` + IsRuntimeTemplate bool `json:",omitempty"` + RuntimeImagePath string `json:",omitempty"` + Stopped bool `json:",omitempty"` + ExitType string `json:",omitempty"` + AreUpdatesPending bool `json:",omitempty"` + ObRoot string `json:",omitempty"` + Statistics Statistics `json:",omitempty"` + ProcessList []ProcessListItem `json:",omitempty"` +} + +// MemoryStats holds the memory statistics for a container +type MemoryStats struct { + UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:",omitempty"` + RuntimeUser100ns uint64 `json:",omitempty"` + RuntimeKernel100ns uint64 `json:",omitempty"` +} + +// StorageStats holds the storage statistics for a container +type StorageStats struct { + ReadCountNormalized uint64 `json:",omitempty"` + ReadSizeBytes uint64 `json:",omitempty"` + WriteCountNormalized uint64 `json:",omitempty"` + WriteSizeBytes uint64 `json:",omitempty"` +} + +// NetworkStats holds the network statistics for a container +type NetworkStats struct { + BytesReceived uint64 `json:",omitempty"` + BytesSent uint64 `json:",omitempty"` + PacketsReceived uint64 `json:",omitempty"` + PacketsSent uint64 `json:",omitempty"` + DroppedPacketsIncoming uint64 `json:",omitempty"` + DroppedPacketsOutgoing uint64 `json:",omitempty"` + EndpointId string `json:",omitempty"` + InstanceId string `json:",omitempty"` +} + +// Statistics is the structure returned by a statistics call on a container +type Statistics struct { + Timestamp time.Time `json:",omitempty"` + ContainerStartTime time.Time `json:",omitempty"` + Uptime100ns uint64 `json:",omitempty"` + Memory MemoryStats `json:",omitempty"` + Processor ProcessorStats `json:",omitempty"` + Storage StorageStats `json:",omitempty"` + Network []NetworkStats `json:",omitempty"` +} + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem struct { + CreateTimestamp time.Time `json:",omitempty"` + ImageName string `json:",omitempty"` + KernelTime100ns uint64 `json:",omitempty"` + MemoryCommitBytes uint64 `json:",omitempty"` + MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` + MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` + ProcessId uint32 `json:",omitempty"` + UserTime100ns uint64 `json:",omitempty"` +} + +// Type of Request Support in ModifySystem +type RequestType string + +// Type of Resource Support in ModifySystem +type ResourceType string + +// RequestType const +const ( + Add RequestType = "Add" + Remove RequestType = "Remove" + Network ResourceType = "Network" +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse struct { + Resource ResourceType `json:"ResourceType"` + Data interface{} `json:"Settings"` + Request RequestType `json:"RequestType,omitempty"` +} + +// createContainerAdditionalJSON is read from the environment at initialisation +// time. It allows an environment variable to define additional JSON which +// is merged in the CreateContainer call to HCS. +var createContainerAdditionalJSON string + +func init() { + createContainerAdditionalJSON = os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON") +} + +// CreateContainer creates a new container with the given configuration but does not start it. +func CreateContainer(id string, c *ContainerConfig) (Container, error) { + return createContainerWithJSON(id, c, "") +} + +// CreateContainerWithJSON creates a new container with the given configuration but does not start it. +// It is identical to CreateContainer except that optional additional JSON can be merged before passing to HCS. +func CreateContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) { + return createContainerWithJSON(id, c, additionalJSON) +} + +func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) { + operation := "CreateContainer" + title := "HCSShim::" + operation + + container := &container{ + id: id, + } + + configurationb, err := json.Marshal(c) + if err != nil { + return nil, err + } + + configuration := string(configurationb) + logrus.Debugf(title+" id=%s config=%s", id, configuration) + + // Merge any additional JSON. Priority is given to what is passed in explicitly, + // falling back to what's set in the environment. + if additionalJSON == "" && createContainerAdditionalJSON != "" { + additionalJSON = createContainerAdditionalJSON + } + if additionalJSON != "" { + configurationMap := map[string]interface{}{} + if err := json.Unmarshal([]byte(configuration), &configurationMap); err != nil { + return nil, fmt.Errorf("failed to unmarshal %s: %s", configuration, err) + } + + additionalMap := map[string]interface{}{} + if err := json.Unmarshal([]byte(additionalJSON), &additionalMap); err != nil { + return nil, fmt.Errorf("failed to unmarshal %s: %s", additionalJSON, err) + } + + mergedMap := mergeMaps(additionalMap, configurationMap) + mergedJSON, err := json.Marshal(mergedMap) + if err != nil { + return nil, fmt.Errorf("failed to marshal merged configuration map %+v: %s", mergedMap, err) + } + + configuration = string(mergedJSON) + logrus.Debugf(title+" id=%s merged config=%s", id, configuration) + } + + var ( + resultp *uint16 + identity syscall.Handle + ) + createError := hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp) + + if createError == nil || IsPending(createError) { + if err := container.registerCallback(); err != nil { + return nil, makeContainerError(container, operation, "", err) + } + } + + err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout) + if err != nil { + return nil, makeContainerError(container, operation, configuration, err) + } + + logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle) + return container, nil +} + +// mergeMaps recursively merges map `fromMap` into map `ToMap`. Any pre-existing values +// in ToMap are overwritten. Values in fromMap are added to ToMap. +// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang +func mergeMaps(fromMap, ToMap interface{}) interface{} { + switch fromMap := fromMap.(type) { + case map[string]interface{}: + ToMap, ok := ToMap.(map[string]interface{}) + if !ok { + return fromMap + } + for keyToMap, valueToMap := range ToMap { + if valueFromMap, ok := fromMap[keyToMap]; ok { + fromMap[keyToMap] = mergeMaps(valueFromMap, valueToMap) + } else { + fromMap[keyToMap] = valueToMap + } + } + case nil: + // merge(nil, map[string]interface{...}) -> map[string]interface{...} + ToMap, ok := ToMap.(map[string]interface{}) + if ok { + return ToMap + } + } + return fromMap +} + +// OpenContainer opens an existing container by ID. +func OpenContainer(id string) (Container, error) { + operation := "OpenContainer" + title := "HCSShim::" + operation + logrus.Debugf(title+" id=%s", id) + + container := &container{ + id: id, + } + + var ( + handle hcsSystem + resultp *uint16 + ) + err := hcsOpenComputeSystem(id, &handle, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return nil, makeContainerError(container, operation, "", err) + } + + container.handle = handle + + if err := container.registerCallback(); err != nil { + return nil, makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle) + return container, nil +} + +// GetContainers gets a list of the containers on the system that match the query +func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { + operation := "GetContainers" + title := "HCSShim::" + operation + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + + query := string(queryb) + logrus.Debugf(title+" query=%s", query) + + var ( + resultp *uint16 + computeSystemsp *uint16 + ) + err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return nil, err + } + + if computeSystemsp == nil { + return nil, ErrUnexpectedValue + } + computeSystemsRaw := convertAndFreeCoTaskMemBytes(computeSystemsp) + computeSystems := []ContainerProperties{} + if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil { + return nil, err + } + + logrus.Debugf(title + " succeeded") + return computeSystems, nil +} + +// Start synchronously starts the container. +func (container *container) Start() error { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "Start" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + if container.handle == 0 { + return makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + var resultp *uint16 + err := hcsStartComputeSystem(container.handle, "", &resultp) + err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout) + if err != nil { + return makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} + +// Shutdown requests a container shutdown, if IsPending() on the error returned is true, +// it may not actually be shut down until Wait() succeeds. +func (container *container) Shutdown() error { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "Shutdown" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + if container.handle == 0 { + return makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + var resultp *uint16 + err := hcsShutdownComputeSystem(container.handle, "", &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} + +// Terminate requests a container terminate, if IsPending() on the error returned is true, +// it may not actually be shut down until Wait() succeeds. +func (container *container) Terminate() error { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "Terminate" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + if container.handle == 0 { + return makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + var resultp *uint16 + err := hcsTerminateComputeSystem(container.handle, "", &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} + +// Wait synchronously waits for the container to shutdown or terminate. +func (container *container) Wait() error { + operation := "Wait" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil) + if err != nil { + return makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} + +// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. +// If the timeout expires, IsTimeout(err) == true +func (container *container) WaitTimeout(timeout time.Duration) error { + operation := "WaitTimeout" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout) + if err != nil { + return makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} + +func (container *container) properties(query string) (*ContainerProperties, error) { + var ( + resultp *uint16 + propertiesp *uint16 + ) + err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return nil, err + } + + if propertiesp == nil { + return nil, ErrUnexpectedValue + } + propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp) + properties := &ContainerProperties{} + if err := json.Unmarshal(propertiesRaw, properties); err != nil { + return nil, err + } + return properties, nil +} + +// HasPendingUpdates returns true if the container has updates pending to install +func (container *container) HasPendingUpdates() (bool, error) { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "HasPendingUpdates" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + if container.handle == 0 { + return false, makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + properties, err := container.properties(pendingUpdatesQuery) + if err != nil { + return false, makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return properties.AreUpdatesPending, nil +} + +// Statistics returns statistics for the container +func (container *container) Statistics() (Statistics, error) { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "Statistics" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + if container.handle == 0 { + return Statistics{}, makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + properties, err := container.properties(statisticsQuery) + if err != nil { + return Statistics{}, makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return properties.Statistics, nil +} + +// ProcessList returns an array of ProcessListItems for the container +func (container *container) ProcessList() ([]ProcessListItem, error) { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "ProcessList" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + if container.handle == 0 { + return nil, makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + properties, err := container.properties(processListQuery) + if err != nil { + return nil, makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return properties.ProcessList, nil +} + +// Pause pauses the execution of the container. This feature is not enabled in TP5. +func (container *container) Pause() error { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "Pause" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + if container.handle == 0 { + return makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + var resultp *uint16 + err := hcsPauseComputeSystem(container.handle, "", &resultp) + err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout) + if err != nil { + return makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} + +// Resume resumes the execution of the container. This feature is not enabled in TP5. +func (container *container) Resume() error { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "Resume" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + if container.handle == 0 { + return makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + var resultp *uint16 + err := hcsResumeComputeSystem(container.handle, "", &resultp) + err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout) + if err != nil { + return makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} + +// CreateProcess launches a new process within the container. +func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "CreateProcess" + title := "HCSShim::Container::" + operation + var ( + processInfo hcsProcessInformation + processHandle hcsProcess + resultp *uint16 + ) + + if container.handle == 0 { + return nil, makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + // If we are not emulating a console, ignore any console size passed to us + if !c.EmulateConsole { + c.ConsoleSize[0] = 0 + c.ConsoleSize[1] = 0 + } + + configurationb, err := json.Marshal(c) + if err != nil { + return nil, makeContainerError(container, operation, "", err) + } + + configuration := string(configurationb) + logrus.Debugf(title+" id=%s config=%s", container.id, configuration) + + err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return nil, makeContainerError(container, operation, configuration, err) + } + + process := &process{ + handle: processHandle, + processID: int(processInfo.ProcessId), + container: container, + cachedPipes: &cachedPipes{ + stdIn: processInfo.StdInput, + stdOut: processInfo.StdOutput, + stdErr: processInfo.StdError, + }, + } + + if err := process.registerCallback(); err != nil { + return nil, makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s processid=%d", container.id, process.processID) + return process, nil +} + +// OpenProcess gets an interface to an existing process within the container. +func (container *container) OpenProcess(pid int) (Process, error) { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "OpenProcess" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s, processid=%d", container.id, pid) + var ( + processHandle hcsProcess + resultp *uint16 + ) + + if container.handle == 0 { + return nil, makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return nil, makeContainerError(container, operation, "", err) + } + + process := &process{ + handle: processHandle, + processID: pid, + container: container, + } + + if err := process.registerCallback(); err != nil { + return nil, makeContainerError(container, operation, "", err) + } + + logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID) + return process, nil +} + +// Close cleans up any state associated with the container but does not terminate or wait for it. +func (container *container) Close() error { + container.handleLock.Lock() + defer container.handleLock.Unlock() + operation := "Close" + title := "HCSShim::Container::" + operation + logrus.Debugf(title+" id=%s", container.id) + + // Don't double free this + if container.handle == 0 { + return nil + } + + if err := container.unregisterCallback(); err != nil { + return makeContainerError(container, operation, "", err) + } + + if err := hcsCloseComputeSystem(container.handle); err != nil { + return makeContainerError(container, operation, "", err) + } + + container.handle = 0 + + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} + +func (container *container) registerCallback() error { + context := ¬ifcationWatcherContext{ + channels: newChannels(), + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = context + callbackMapLock.Unlock() + + var callbackHandle hcsCallback + err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle) + if err != nil { + return err + } + context.handle = callbackHandle + container.callbackNumber = callbackNumber + + return nil +} + +func (container *container) unregisterCallback() error { + callbackNumber := container.callbackNumber + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return nil + } + + handle := context.handle + + if handle == 0 { + return nil + } + + // hcsUnregisterComputeSystemCallback has its own syncronization + // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := hcsUnregisterComputeSystemCallback(handle) + if err != nil { + return err + } + + closeChannels(context.channels) + + callbackMapLock.Lock() + callbackMap[callbackNumber] = nil + callbackMapLock.Unlock() + + handle = 0 + + return nil +} + +// Modifies the System by sending a request to HCS +func (container *container) Modify(config *ResourceModificationRequestResponse) error { + container.handleLock.RLock() + defer container.handleLock.RUnlock() + operation := "Modify" + title := "HCSShim::Container::" + operation + + if container.handle == 0 { + return makeContainerError(container, operation, "", ErrAlreadyClosed) + } + + requestJSON, err := json.Marshal(config) + if err != nil { + return err + } + + requestString := string(requestJSON) + logrus.Debugf(title+" id=%s request=%s", container.id, requestString) + + var resultp *uint16 + err = hcsModifyComputeSystem(container.handle, requestString, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return makeContainerError(container, operation, "", err) + } + logrus.Debugf(title+" succeeded id=%s", container.id) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/createlayer.go b/vendor/github.com/Microsoft/hcsshim/createlayer.go new file mode 100644 index 0000000..9ecffb1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/createlayer.go @@ -0,0 +1,27 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// CreateLayer creates a new, empty, read-only layer on the filesystem based on +// the parent layer provided. +func CreateLayer(info DriverInfo, id, parent string) error { + title := "hcsshim::CreateLayer " + logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent) + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = createLayer(&infop, id, parent) + if err != nil { + err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour) + logrus.Error(err) + return err + } + + logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go b/vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go new file mode 100644 index 0000000..b69c3da --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/createsandboxlayer.go @@ -0,0 +1,35 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// CreateSandboxLayer creates and populates new read-write layer for use by a container. +// This requires both the id of the direct parent layer, as well as the full list +// of paths to all parent layers up to the base (and including the direct parent +// whose id was provided). +func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + title := "hcsshim::CreateSandboxLayer " + logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return err + } + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = createSandboxLayer(&infop, layerId, parentId, layers) + if err != nil { + err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId) + logrus.Error(err) + return err + } + + logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/deactivatelayer.go new file mode 100644 index 0000000..c02bcb3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/deactivatelayer.go @@ -0,0 +1,26 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. +func DeactivateLayer(info DriverInfo, id string) error { + title := "hcsshim::DeactivateLayer " + logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = deactivateLayer(&infop, id) + if err != nil { + err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour) + logrus.Error(err) + return err + } + + logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/destroylayer.go new file mode 100644 index 0000000..91ed269 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/destroylayer.go @@ -0,0 +1,27 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// DestroyLayer will remove the on-disk files representing the layer with the given +// id, including that layer's containing folder, if any. +func DestroyLayer(info DriverInfo, id string) error { + title := "hcsshim::DestroyLayer " + logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = destroyLayer(&infop, id) + if err != nil { + err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour) + logrus.Error(err) + return err + } + + logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go new file mode 100644 index 0000000..d2f9cc8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -0,0 +1,239 @@ +package hcsshim + +import ( + "errors" + "fmt" + "syscall" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists + ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = syscall.Errno(0x490) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = syscall.Errno(0x32) + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = syscall.Errno(0xd) + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = errors.New("hcsshim: timeout waiting for notification") + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = errors.New("unexpected container exit") + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = errors.New("unexpected value returned from hcs") + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) + + // ErrProcNotFound is an error encountered when the the process cannot be found + ErrProcNotFound = syscall.Errno(0x7f) + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = errors.New("unsupported platform request") +) + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + Process *process + Operation string + ExtraInfo string + Err error +} + +// ContainerError is an error encountered in HCS during an operation on a Container object +type ContainerError struct { + Container *container + Operation string + ExtraInfo string + Err error +} + +func (e *ContainerError) Error() string { + if e == nil { + return "" + } + + if e.Container == nil { + return "unexpected nil container for error: " + e.Err.Error() + } + + s := "container " + e.Container.id + + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + if e.ExtraInfo != "" { + s += " extra info: " + e.ExtraInfo + } + + return s +} + +func makeContainerError(container *container, operation string, extraInfo string, err error) error { + // Don't double wrap errors + if _, ok := err.(*ContainerError); ok { + return err + } + containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err} + return containerError +} + +func (e *ProcessError) Error() string { + if e == nil { + return "" + } + + if e.Process == nil { + return "Unexpected nil process for error: " + e.Err.Error() + } + + s := fmt.Sprintf("process %d", e.Process.processID) + + if e.Process.container != nil { + s += " in container " + e.Process.container.id + } + + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + return s +} + +func makeProcessError(process *process, operation string, extraInfo string, err error) error { + // Don't double wrap errors + if _, ok := err.(*ProcessError); ok { + return err + } + processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err} + return processError +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +func IsNotExist(err error) bool { + err = getInnerError(err) + return err == ErrComputeSystemDoesNotExist || + err == ErrElementNotFound || + err == ErrProcNotFound +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + err = getInnerError(err) + return err == ErrAlreadyClosed +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationPending +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + err = getInnerError(err) + return err == ErrTimeout +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +func IsAlreadyStopped(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeAlreadyStopped || + err == ErrElementNotFound || + err == ErrProcNotFound +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + err = getInnerError(err) + // If Platform doesn't recognize or support the request sent, below errors are seen + return err == ErrVmcomputeInvalidJSON || + err == ErrInvalidData || + err == ErrNotSupported || + err == ErrVmcomputeUnknownMessage +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *ContainerError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go b/vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go new file mode 100644 index 0000000..e168921 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/expandsandboxsize.go @@ -0,0 +1,26 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// ExpandSandboxSize expands the size of a layer to at least size bytes. +func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { + title := "hcsshim::ExpandSandboxSize " + logrus.Debugf(title+"layerId=%s size=%d", layerId, size) + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = expandSandboxSize(&infop, layerId, size) + if err != nil { + err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size) + logrus.Error(err) + return err + } + + logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/exportlayer.go new file mode 100644 index 0000000..3c9b24e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/exportlayer.go @@ -0,0 +1,156 @@ +package hcsshim + +import ( + "io" + "io/ioutil" + "os" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Sirupsen/logrus" +) + +// ExportLayer will create a folder at exportFolderPath and fill that folder with +// the transport format version of the layer identified by layerId. This transport +// format includes any metadata required for later importing the layer (using +// ImportLayer), and requires the full list of parent layer paths in order to +// perform the export. +func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { + title := "hcsshim::ExportLayer " + logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerId, exportFolderPath) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return err + } + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = exportLayer(&infop, layerId, exportFolderPath, layers) + if err != nil { + err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerId, info.Flavour, exportFolderPath) + logrus.Error(err) + return err + } + + logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerId, exportFolderPath) + return nil +} + +type LayerReader interface { + Next() (string, int64, *winio.FileBasicInfo, error) + Read(b []byte) (int, error) + Close() error +} + +// FilterLayerReader provides an interface for extracting the contents of an on-disk layer. +type FilterLayerReader struct { + context uintptr +} + +// Next reads the next available file from a layer, ensuring that parent directories are always read +// before child files and directories. +// +// Next returns the file's relative path, size, and basic file metadata. Read() should be used to +// extract a Win32 backup stream with the remainder of the metadata and the data. +func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) { + var fileNamep *uint16 + fileInfo := &winio.FileBasicInfo{} + var deleted uint32 + var fileSize int64 + err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted) + if err != nil { + if err == syscall.ERROR_NO_MORE_FILES { + err = io.EOF + } else { + err = makeError(err, "ExportLayerNext", "") + } + return "", 0, nil, err + } + fileName := convertAndFreeCoTaskMemString(fileNamep) + if deleted != 0 { + fileInfo = nil + } + if fileName[0] == '\\' { + fileName = fileName[1:] + } + return fileName, fileSize, fileInfo, nil +} + +// Read reads from the current file's Win32 backup stream. +func (r *FilterLayerReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := exportLayerRead(r.context, b, &bytesRead) + if err != nil { + return 0, makeError(err, "ExportLayerRead", "") + } + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees resources associated with the layer reader. It will return an +// error if there was an error while reading the layer or of the layer was not +// completely read. +func (r *FilterLayerReader) Close() (err error) { + if r.context != 0 { + err = exportLayerEnd(r.context) + if err != nil { + err = makeError(err, "ExportLayerEnd", "") + } + r.context = 0 + } + return +} + +// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. +// The caller must have taken the SeBackupPrivilege privilege +// to call this and any methods on the resulting LayerReader. +func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { + if procExportLayerBegin.Find() != nil { + // The new layer reader is not available on this Windows build. Fall back to the + // legacy export code path. + path, err := ioutil.TempDir("", "hcs") + if err != nil { + return nil, err + } + err = ExportLayer(info, layerID, path, parentLayerPaths) + if err != nil { + os.RemoveAll(path) + return nil, err + } + return &legacyLayerReaderWrapper{newLegacyLayerReader(path)}, nil + } + + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return nil, err + } + infop, err := convertDriverInfo(info) + if err != nil { + return nil, err + } + r := &FilterLayerReader{} + err = exportLayerBegin(&infop, layerID, layers, &r.context) + if err != nil { + return nil, makeError(err, "ExportLayerBegin", "") + } + return r, err +} + +type legacyLayerReaderWrapper struct { + *legacyLayerReader +} + +func (r *legacyLayerReaderWrapper) Close() error { + err := r.legacyLayerReader.Close() + os.RemoveAll(r.root) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/getlayermountpath.go new file mode 100644 index 0000000..41b5758 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/getlayermountpath.go @@ -0,0 +1,55 @@ +package hcsshim + +import ( + "syscall" + + "github.com/Sirupsen/logrus" +) + +// GetLayerMountPath will look for a mounted layer with the given id and return +// the path at which that layer can be accessed. This path may be a volume path +// if the layer is a mounted read-write layer, otherwise it is expected to be the +// folder path at which the layer is stored. +func GetLayerMountPath(info DriverInfo, id string) (string, error) { + title := "hcsshim::GetLayerMountPath " + logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return "", err + } + + var mountPathLength uintptr + mountPathLength = 0 + + // Call the procedure itself. + logrus.Debugf("Calling proc (1)") + err = getLayerMountPath(&infop, id, &mountPathLength, nil) + if err != nil { + err = makeErrorf(err, title, "(first call) id=%s flavour=%d", id, info.Flavour) + logrus.Error(err) + return "", err + } + + // Allocate a mount path of the returned length. + if mountPathLength == 0 { + return "", nil + } + mountPathp := make([]uint16, mountPathLength) + mountPathp[0] = 0 + + // Call the procedure again + logrus.Debugf("Calling proc (2)") + err = getLayerMountPath(&infop, id, &mountPathLength, &mountPathp[0]) + if err != nil { + err = makeErrorf(err, title, "(second call) id=%s flavour=%d", id, info.Flavour) + logrus.Error(err) + return "", err + } + + path := syscall.UTF16ToString(mountPathp[0:]) + logrus.Debugf(title+"succeeded flavour=%d id=%s path=%s", info.Flavour, id, path) + return path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/getsharedbaseimages.go new file mode 100644 index 0000000..01ab4da --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/getsharedbaseimages.go @@ -0,0 +1,22 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// GetSharedBaseImages will enumerate the images stored in the common central +// image store and return descriptive info about those images for the purpose +// of registering them with the graphdriver, graph, and tagstore. +func GetSharedBaseImages() (imageData string, err error) { + title := "hcsshim::GetSharedBaseImages " + + logrus.Debugf("Calling proc") + var buffer *uint16 + err = getBaseImages(&buffer) + if err != nil { + err = makeError(err, title, "") + logrus.Error(err) + return + } + imageData = convertAndFreeCoTaskMemString(buffer) + logrus.Debugf(title+" - succeeded output=%s", imageData) + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/guid.go b/vendor/github.com/Microsoft/hcsshim/guid.go new file mode 100644 index 0000000..620aba1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/guid.go @@ -0,0 +1,19 @@ +package hcsshim + +import ( + "crypto/sha1" + "fmt" +) + +type GUID [16]byte + +func NewGUID(source string) *GUID { + h := sha1.Sum([]byte(source)) + var g GUID + copy(g[0:], h[0:16]) + return &g +} + +func (g *GUID) ToString() string { + return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:]) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go new file mode 100644 index 0000000..3cb3a29 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go @@ -0,0 +1,166 @@ +// Shim for the Host Compute Service (HCS) to manage Windows Server +// containers and Hyper-V containers. + +package hcsshim + +import ( + "fmt" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go + +//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree +//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId + +//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? +//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? +//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? +//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? +//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? +//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? +//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? +//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? +//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? +//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? +//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? +//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? +//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid? +//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? +//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? +//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? +//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? + +//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin? +//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext? +//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite? +//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd? + +//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin? +//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext? +//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead? +//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd? + +//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? +//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? +//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? +//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? +//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? +//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? +//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? +//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? +//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? +//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? +//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? +//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? +//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? + +//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? +//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? +//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess? +//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? +//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? +//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? +//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? +//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? +//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? + +//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? + +//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? + +const ( + // Specific user-visible exit codes + WaitErrExecFailed = 32767 + + ERROR_GEN_FAILURE = syscall.Errno(31) + ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) + WSAEINVAL = syscall.Errno(10022) + + // Timeout on wait calls + TimeoutInfinite = 0xFFFFFFFF +) + +type HcsError struct { + title string + rest string + Err error +} + +type hcsSystem syscall.Handle +type hcsProcess syscall.Handle +type hcsCallback syscall.Handle + +type hcsProcessInformation struct { + ProcessId uint32 + Reserved uint32 + StdInput syscall.Handle + StdOutput syscall.Handle + StdError syscall.Handle +} + +func makeError(err error, title, rest string) error { + // Pass through DLL errors directly since they do not originate from HCS. + if _, ok := err.(*syscall.DLLError); ok { + return err + } + return &HcsError{title, rest, err} +} + +func makeErrorf(err error, title, format string, a ...interface{}) error { + return makeError(err, title, fmt.Sprintf(format, a...)) +} + +func win32FromError(err error) uint32 { + if herr, ok := err.(*HcsError); ok { + return win32FromError(herr.Err) + } + if code, ok := err.(syscall.Errno); ok { + return uint32(code) + } + return uint32(ERROR_GEN_FAILURE) +} + +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +func (e *HcsError) Error() string { + s := e.title + if len(s) > 0 && s[len(s)-1] != ' ' { + s += " " + } + s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err)) + if e.rest != "" { + if e.rest[0] != ' ' { + s += " " + } + s += e.rest + } + return s +} + +func convertAndFreeCoTaskMemString(buffer *uint16) string { + str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:]) + coTaskMemFree(unsafe.Pointer(buffer)) + return str +} + +func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte { + return []byte(convertAndFreeCoTaskMemString(buffer)) +} + +func processHcsResult(err error, resultp *uint16) error { + if resultp != nil { + result := convertAndFreeCoTaskMemString(resultp) + logrus.Debugf("Result: %s", result) + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/hnsfuncs.go new file mode 100644 index 0000000..97ec2e8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsfuncs.go @@ -0,0 +1,271 @@ +package hcsshim + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/Sirupsen/logrus" +) + +type NatPolicy struct { + Type string + Protocol string + InternalPort uint16 + ExternalPort uint16 +} + +type QosPolicy struct { + Type string + MaximumOutgoingBandwidthInBytes uint64 +} + +type VlanPolicy struct { + Type string + VLAN uint +} + +type VsidPolicy struct { + Type string + VSID uint +} + +type PaPolicy struct { + Type string + PA string +} + +// Subnet is assoicated with a network and represents a list +// of subnets available to the network +type Subnet struct { + AddressPrefix string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` +} + +// MacPool is assoicated with a network and represents a list +// of macaddresses available to the network +type MacPool struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// HNSNetwork represents a network in HNS +type HNSNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type string `json:",omitempty"` + NetworkAdapterName string `json:",omitempty"` + SourceMac string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacPools []MacPool `json:",omitempty"` + Subnets []Subnet `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSServerCompartment uint32 `json:",omitempty"` + ManagementIP string `json:",omitempty"` +} + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + VirtualNetwork string `json:",omitempty"` + VirtualNetworkName string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacAddress string `json:",omitempty"` + IPAddress net.IP `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + EnableInternalDNS bool `json:",omitempty"` + DisableICC bool `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` + IsRemoteEndpoint bool `json:",omitempty"` +} + +type hnsNetworkResponse struct { + Success bool + Error string + Output HNSNetwork +} + +type hnsResponse struct { + Success bool + Error string + Output json.RawMessage +} + +func hnsCall(method, path, request string, returnResponse interface{}) error { + var responseBuffer *uint16 + logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) + + err := _hnsCall(method, path, request, &responseBuffer) + if err != nil { + return makeError(err, "hnsCall ", "") + } + response := convertAndFreeCoTaskMemString(responseBuffer) + + hnsresponse := &hnsResponse{} + if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { + return err + } + + if !hnsresponse.Success { + return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) + } + + if len(hnsresponse.Output) == 0 { + return nil + } + + logrus.Debugf("Network Response : %s", hnsresponse.Output) + err = json.Unmarshal(hnsresponse.Output, returnResponse) + if err != nil { + return err + } + + return nil +} + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + var network HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return &network, nil +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + var network []HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return network, nil +} + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + endpoint := &HNSEndpoint{} + err := hnsCall(method, "/endpoints/"+path, request, &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + var endpoint []HNSEndpoint + err := hnsCall("GET", "/endpoints/", "", &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container +func HotAttachEndpoint(containerID string, endpointID string) error { + return modifyNetworkEndpoint(containerID, endpointID, Add) +} + +// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container +func HotDetachEndpoint(containerID string, endpointID string) error { + return modifyNetworkEndpoint(containerID, endpointID, Remove) +} + +// ModifyContainer corresponding to the container id, by sending a request +func modifyContainer(id string, request *ResourceModificationRequestResponse) error { + container, err := OpenContainer(id) + if err != nil { + if IsNotExist(err) { + return ErrComputeSystemDoesNotExist + } + return getInnerError(err) + } + defer container.Close() + err = container.Modify(request) + if err != nil { + if IsNotSupported(err) { + return ErrPlatformNotSupported + } + return getInnerError(err) + } + + return nil +} + +func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { + requestMessage := &ResourceModificationRequestResponse{ + Resource: Network, + Request: request, + Data: endpointID, + } + err := modifyContainer(containerID, requestMessage) + + if err != nil { + return err + } + + return nil +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return HNSNetworkRequest("GET", networkID, "") +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + hsnnetworks, err := HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + for _, hnsnetwork := range hsnnetworks { + if hnsnetwork.Name == networkName { + return &hnsnetwork, nil + } + } + return nil, fmt.Errorf("Network %v not found", networkName) +} + +// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods +func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + return HNSEndpointRequest("POST", "", string(jsonString)) +} + +// Create Endpoint by sending EndpointRequest to HNS +func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { + return HNSEndpointRequest("DELETE", endpoint.Id, "") +} + +// GetHNSEndpointByID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return HNSEndpointRequest("GET", endpointID, "") +} + +// GetHNSNetworkName filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + hnsResponse, err := HNSListEndpointRequest() + if err != nil { + return nil, err + } + for _, hnsEndpoint := range hnsResponse { + if hnsEndpoint.Name == endpointName { + return &hnsEndpoint, nil + } + } + return nil, fmt.Errorf("Endpoint %v not found", endpointName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/importlayer.go b/vendor/github.com/Microsoft/hcsshim/importlayer.go new file mode 100644 index 0000000..75dcd94 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/importlayer.go @@ -0,0 +1,212 @@ +package hcsshim + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Microsoft/go-winio" + "github.com/Sirupsen/logrus" +) + +// ImportLayer will take the contents of the folder at importFolderPath and import +// that into a layer with the id layerId. Note that in order to correctly populate +// the layer and interperet the transport format, all parent layers must already +// be present on the system at the paths provided in parentLayerPaths. +func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { + title := "hcsshim::ImportLayer " + logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerID, importFolderPath) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return err + } + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = importLayer(&infop, layerID, importFolderPath, layers) + if err != nil { + err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerID, info.Flavour, importFolderPath) + logrus.Error(err) + return err + } + + logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerID, importFolderPath) + return nil +} + +// LayerWriter is an interface that supports writing a new container image layer. +type LayerWriter interface { + // Add adds a file to the layer with given metadata. + Add(name string, fileInfo *winio.FileBasicInfo) error + // AddLink adds a hard link to the layer. The target must already have been added. + AddLink(name string, target string) error + // Remove removes a file that was present in a parent layer from the layer. + Remove(name string) error + // Write writes data to the current file. The data must be in the format of a Win32 + // backup stream. + Write(b []byte) (int, error) + // Close finishes the layer writing process and releases any resources. + Close() error +} + +// FilterLayerWriter provides an interface to write the contents of a layer to the file system. +type FilterLayerWriter struct { + context uintptr +} + +// Add adds a file or directory to the layer. The file's parent directory must have already been added. +// +// name contains the file's relative path. fileInfo contains file times and file attributes; the rest +// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method. +// winio.BackupStreamWriter can be used to facilitate this. +func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { + if name[0] != '\\' { + name = `\` + name + } + err := importLayerNext(w.context, name, fileInfo) + if err != nil { + return makeError(err, "ImportLayerNext", "") + } + return nil +} + +// AddLink adds a hard link to the layer. The target of the link must have already been added. +func (w *FilterLayerWriter) AddLink(name string, target string) error { + return errors.New("hard links not yet supported") +} + +// Remove removes a file from the layer. The file must have been present in the parent layer. +// +// name contains the file's relative path. +func (w *FilterLayerWriter) Remove(name string) error { + if name[0] != '\\' { + name = `\` + name + } + err := importLayerNext(w.context, name, nil) + if err != nil { + return makeError(err, "ImportLayerNext", "") + } + return nil +} + +// Write writes more backup stream data to the current file. +func (w *FilterLayerWriter) Write(b []byte) (int, error) { + err := importLayerWrite(w.context, b) + if err != nil { + err = makeError(err, "ImportLayerWrite", "") + return 0, err + } + return len(b), err +} + +// Close completes the layer write operation. The error must be checked to ensure that the +// operation was successful. +func (w *FilterLayerWriter) Close() (err error) { + if w.context != 0 { + err = importLayerEnd(w.context) + if err != nil { + err = makeError(err, "ImportLayerEnd", "") + } + w.context = 0 + } + return +} + +type legacyLayerWriterWrapper struct { + *legacyLayerWriter + info DriverInfo + layerID string + path string + parentLayerPaths []string +} + +func (r *legacyLayerWriterWrapper) Close() error { + defer os.RemoveAll(r.root) + err := r.legacyLayerWriter.Close() + if err != nil { + return err + } + + // Use the original path here because ImportLayer does not support long paths for the source in TP5. + // But do use a long path for the destination to work around another bug with directories + // with MAX_PATH - 12 < length < MAX_PATH. + info := r.info + fullPath, err := makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID)) + if err != nil { + return err + } + + info.HomeDir = "" + if err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths); err != nil { + return err + } + // Add any hard links that were collected. + for _, lnk := range r.PendingLinks { + if err = os.Remove(lnk.Path); err != nil && !os.IsNotExist(err) { + return err + } + if err = os.Link(lnk.Target, lnk.Path); err != nil { + return err + } + } + // Prepare the utility VM for use if one is present in the layer. + if r.HasUtilityVM { + err = ProcessUtilityVMImage(filepath.Join(fullPath, "UtilityVM")) + if err != nil { + return err + } + } + return nil +} + +// NewLayerWriter returns a new layer writer for creating a layer on disk. +// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges +// to call this and any methods on the resulting LayerWriter. +func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { + if len(parentLayerPaths) == 0 { + // This is a base layer. It gets imported differently. + return &baseLayerWriter{ + root: filepath.Join(info.HomeDir, layerID), + }, nil + } + + if procImportLayerBegin.Find() != nil { + // The new layer reader is not available on this Windows build. Fall back to the + // legacy export code path. + path, err := ioutil.TempDir("", "hcs") + if err != nil { + return nil, err + } + return &legacyLayerWriterWrapper{ + legacyLayerWriter: newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID)), + info: info, + layerID: layerID, + path: path, + parentLayerPaths: parentLayerPaths, + }, nil + } + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return nil, err + } + + infop, err := convertDriverInfo(info) + if err != nil { + return nil, err + } + + w := &FilterLayerWriter{} + err = importLayerBegin(&infop, layerID, layers, &w.context) + if err != nil { + return nil, makeError(err, "ImportLayerStart", "") + } + return w, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go new file mode 100644 index 0000000..5238a64 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/interface.go @@ -0,0 +1,174 @@ +package hcsshim + +import ( + "encoding/json" + "io" + "time" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig struct { + ApplicationName string `json:",omitempty"` + CommandLine string `json:",omitempty"` + CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows + User string `json:",omitempty"` + WorkingDirectory string `json:",omitempty"` + Environment map[string]string `json:",omitempty"` + EmulateConsole bool `json:",omitempty"` + CreateStdInPipe bool `json:",omitempty"` + CreateStdOutPipe bool `json:",omitempty"` + CreateStdErrPipe bool `json:",omitempty"` + ConsoleSize [2]uint `json:",omitempty"` + CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows + OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows +} + +type Layer struct { + ID string + Path string +} + +type MappedDir struct { + HostPath string + ContainerPath string + ReadOnly bool + BandwidthMaximum uint64 + IOPSMaximum uint64 +} + +type HvRuntime struct { + ImagePath string `json:",omitempty"` + SkipTemplate bool `json:",omitempty"` + LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM + LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM +} + +type MappedVirtualDisk struct { + HostPath string `json:",omitempty"` // Path to VHD on the host + ContainerPath string // Platform-specific mount point path in the container + CreateInUtilityVM bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" +} + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig struct { + SystemType string // HCS requires this to be hard-coded to "Container" + Name string // Name of the container. We use the docker ID. + Owner string `json:",omitempty"` // The management platform that created this container + VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} + IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows + LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID + Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID + Credentials string `json:",omitempty"` // Credentials information + ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. + ProcessorWeight uint64 `json:",omitempty"` // CPU Shares 0..10000 on Windows; where 0 will be omitted and HCS will default. + ProcessorMaximum int64 `json:",omitempty"` // CPU maximum usage percent 1..100 + StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS + StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second + StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller + MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes + HostName string `json:",omitempty"` // Hostname + MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) + HvPartition bool // True if it a Hyper-V Container + NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. + EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container + HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM + Servicing bool `json:",omitempty"` // True if this container is for servicing + AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution + DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution + ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. + TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed + MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start +} + +type ComputeSystemQuery struct { + IDs []string `json:"Ids,omitempty"` + Types []string `json:",omitempty"` + Names []string `json:",omitempty"` + Owners []string `json:",omitempty"` +} + +// Container represents a created (but not necessarily running) container. +type Container interface { + // Start synchronously starts the container. + Start() error + + // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. + Shutdown() error + + // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. + Terminate() error + + // Waits synchronously waits for the container to shutdown or terminate. + Wait() error + + // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It + // returns false if timeout occurs. + WaitTimeout(time.Duration) error + + // Pause pauses the execution of a container. + Pause() error + + // Resume resumes the execution of a container. + Resume() error + + // HasPendingUpdates returns true if the container has updates pending to install. + HasPendingUpdates() (bool, error) + + // Statistics returns statistics for a container. + Statistics() (Statistics, error) + + // ProcessList returns details for the processes in a container. + ProcessList() ([]ProcessListItem, error) + + // CreateProcess launches a new process within the container. + CreateProcess(c *ProcessConfig) (Process, error) + + // OpenProcess gets an interface to an existing process within the container. + OpenProcess(pid int) (Process, error) + + // Close cleans up any state associated with the container but does not terminate or wait for it. + Close() error + + // Modify the System + Modify(config *ResourceModificationRequestResponse) error +} + +// Process represents a running or exited process. +type Process interface { + // Pid returns the process ID of the process within the container. + Pid() int + + // Kill signals the process to terminate but does not wait for it to finish terminating. + Kill() error + + // Wait waits for the process to exit. + Wait() error + + // WaitTimeout waits for the process to exit or the duration to elapse. It returns + // false if timeout occurs. + WaitTimeout(time.Duration) error + + // ExitCode returns the exit code of the process. The process must have + // already terminated. + ExitCode() (int, error) + + // ResizeConsole resizes the console of the process. + ResizeConsole(width, height uint16) error + + // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing + // these pipes does not close the underlying pipes; it should be possible to + // call this multiple times to get multiple interfaces. + Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) + + // CloseStdin closes the write side of the stdin pipe so that the process is + // notified on the read side that there is no more data in stdin. + CloseStdin() error + + // Close cleans up any state associated with the process but does not kill + // or wait on it. + Close() error +} diff --git a/vendor/github.com/Microsoft/hcsshim/layerexists.go b/vendor/github.com/Microsoft/hcsshim/layerexists.go new file mode 100644 index 0000000..522d95c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/layerexists.go @@ -0,0 +1,30 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// LayerExists will return true if a layer with the given id exists and is known +// to the system. +func LayerExists(info DriverInfo, id string) (bool, error) { + title := "hcsshim::LayerExists " + logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return false, err + } + + // Call the procedure itself. + var exists uint32 + + err = layerExists(&infop, id, &exists) + if err != nil { + err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour) + logrus.Error(err) + return false, err + } + + logrus.Debugf(title+"succeeded flavour=%d id=%s exists=%d", info.Flavour, id, exists) + return exists != 0, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/layerutils.go b/vendor/github.com/Microsoft/hcsshim/layerutils.go new file mode 100644 index 0000000..47229d2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/layerutils.go @@ -0,0 +1,111 @@ +package hcsshim + +// This file contains utility functions to support storage (graph) related +// functionality. + +import ( + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" +) + +/* To pass into syscall, we need a struct matching the following: +enum GraphDriverType +{ + DiffDriver, + FilterDriver +}; + +struct DriverInfo { + GraphDriverType Flavour; + LPCWSTR HomeDir; +}; +*/ +type DriverInfo struct { + Flavour int + HomeDir string +} + +type driverInfo struct { + Flavour int + HomeDirp *uint16 +} + +func convertDriverInfo(info DriverInfo) (driverInfo, error) { + homedirp, err := syscall.UTF16PtrFromString(info.HomeDir) + if err != nil { + logrus.Debugf("Failed conversion of home to pointer for driver info: %s", err.Error()) + return driverInfo{}, err + } + + return driverInfo{ + Flavour: info.Flavour, + HomeDirp: homedirp, + }, nil +} + +/* To pass into syscall, we need a struct matching the following: +typedef struct _WC_LAYER_DESCRIPTOR { + + // + // The ID of the layer + // + + GUID LayerId; + + // + // Additional flags + // + + union { + struct { + ULONG Reserved : 31; + ULONG Dirty : 1; // Created from sandbox as a result of snapshot + }; + ULONG Value; + } Flags; + + // + // Path to the layer root directory, null-terminated + // + + PCWSTR Path; + +} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; +*/ +type WC_LAYER_DESCRIPTOR struct { + LayerId GUID + Flags uint32 + Pathp *uint16 +} + +func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { + // Array of descriptors that gets constructed. + var layers []WC_LAYER_DESCRIPTOR + + for i := 0; i < len(parentLayerPaths); i++ { + // Create a layer descriptor, using the folder name + // as the source for a GUID LayerId + _, folderName := filepath.Split(parentLayerPaths[i]) + g, err := NameToGuid(folderName) + if err != nil { + logrus.Debugf("Failed to convert name to guid %s", err) + return nil, err + } + + p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) + if err != nil { + logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err) + return nil, err + } + + layers = append(layers, WC_LAYER_DESCRIPTOR{ + LayerId: g, + Flags: 0, + Pathp: p, + }) + } + + return layers, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/legacy.go b/vendor/github.com/Microsoft/hcsshim/legacy.go new file mode 100644 index 0000000..8576157 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/legacy.go @@ -0,0 +1,731 @@ +package hcsshim + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/go-winio" +) + +var errorIterationCanceled = errors.New("") + +var mutatedUtilityVMFiles = map[string]bool{ + `EFI\Microsoft\Boot\BCD`: true, + `EFI\Microsoft\Boot\BCD.LOG`: true, + `EFI\Microsoft\Boot\BCD.LOG1`: true, + `EFI\Microsoft\Boot\BCD.LOG2`: true, +} + +const ( + filesPath = `Files` + hivesPath = `Hives` + utilityVMPath = `UtilityVM` + utilityVMFilesPath = `UtilityVM\Files` +) + +func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { + return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) +} + +func makeLongAbsPath(path string) (string, error) { + if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) { + return path, nil + } + if !filepath.IsAbs(path) { + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + path = absPath + } + if strings.HasPrefix(path, `\\`) { + return `\\?\UNC\` + path[2:], nil + } + return `\\?\` + path, nil +} + +func hasPathPrefix(p, prefix string) bool { + return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\' +} + +type fileEntry struct { + path string + fi os.FileInfo + err error +} + +type legacyLayerReader struct { + root string + result chan *fileEntry + proceed chan bool + currentFile *os.File + backupReader *winio.BackupFileReader +} + +// newLegacyLayerReader returns a new LayerReader that can read the Windows +// container layer transport format from disk. +func newLegacyLayerReader(root string) *legacyLayerReader { + r := &legacyLayerReader{ + root: root, + result: make(chan *fileEntry), + proceed: make(chan bool), + } + go r.walk() + return r +} + +func readTombstones(path string) (map[string]([]string), error) { + tf, err := os.Open(filepath.Join(path, "tombstones.txt")) + if err != nil { + return nil, err + } + defer tf.Close() + s := bufio.NewScanner(tf) + if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { + return nil, errors.New("Invalid tombstones file") + } + + ts := make(map[string]([]string)) + for s.Scan() { + t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\` + dir := filepath.Dir(t) + ts[dir] = append(ts[dir], t) + } + if err = s.Err(); err != nil { + return nil, err + } + + return ts, nil +} + +func (r *legacyLayerReader) walkUntilCancelled() error { + root, err := makeLongAbsPath(r.root) + if err != nil { + return err + } + + r.root = root + ts, err := readTombstones(r.root) + if err != nil { + return err + } + + err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { + return nil + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + // List all the tombstones. + if info.IsDir() { + relPath, err := filepath.Rel(r.root, path) + if err != nil { + return err + } + if dts, ok := ts[relPath]; ok { + for _, t := range dts { + r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} + if !<-r.proceed { + return errorIterationCanceled + } + } + } + } + return nil + }) + if err == errorIterationCanceled { + return nil + } + if err == nil { + return io.EOF + } + return err +} + +func (r *legacyLayerReader) walk() { + defer close(r.result) + if !<-r.proceed { + return + } + + err := r.walkUntilCancelled() + if err != nil { + for { + r.result <- &fileEntry{err: err} + if !<-r.proceed { + return + } + } + } +} + +func (r *legacyLayerReader) reset() { + if r.backupReader != nil { + r.backupReader.Close() + r.backupReader = nil + } + if r.currentFile != nil { + r.currentFile.Close() + r.currentFile = nil + } +} + +func findBackupStreamSize(r io.Reader) (int64, error) { + br := winio.NewBackupStreamReader(r) + for { + hdr, err := br.Next() + if err != nil { + if err == io.EOF { + err = nil + } + return 0, err + } + if hdr.Id == winio.BackupData { + return hdr.Size, nil + } + } +} + +func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { + r.reset() + r.proceed <- true + fe := <-r.result + if fe == nil { + err = errors.New("LegacyLayerReader closed") + return + } + if fe.err != nil { + err = fe.err + return + } + + path, err = filepath.Rel(r.root, fe.path) + if err != nil { + return + } + + if fe.fi == nil { + // This is a tombstone. Return a nil fileInfo. + return + } + + if fe.fi.IsDir() && hasPathPrefix(path, filesPath) { + fe.path += ".$wcidirs$" + } + + f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) + if err != nil { + return + } + defer func() { + if f != nil { + f.Close() + } + }() + + fileInfo, err = winio.GetFileBasicInfo(f) + if err != nil { + return + } + + if !hasPathPrefix(path, filesPath) { + size = fe.fi.Size() + r.backupReader = winio.NewBackupFileReader(f, false) + if path == hivesPath || path == filesPath { + // The Hives directory has a non-deterministic file time because of the + // nature of the import process. Use the times from System_Delta. + var g *os.File + g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`)) + if err != nil { + return + } + attr := fileInfo.FileAttributes + fileInfo, err = winio.GetFileBasicInfo(g) + g.Close() + if err != nil { + return + } + fileInfo.FileAttributes = attr + } + + // The creation time and access time get reset for files outside of the Files path. + fileInfo.CreationTime = fileInfo.LastWriteTime + fileInfo.LastAccessTime = fileInfo.LastWriteTime + + } else { + // The file attributes are written before the backup stream. + var attr uint32 + err = binary.Read(f, binary.LittleEndian, &attr) + if err != nil { + return + } + fileInfo.FileAttributes = uintptr(attr) + beginning := int64(4) + + // Find the accurate file size. + if !fe.fi.IsDir() { + size, err = findBackupStreamSize(f) + if err != nil { + err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} + return + } + } + + // Return back to the beginning of the backup stream. + _, err = f.Seek(beginning, 0) + if err != nil { + return + } + } + + r.currentFile = f + f = nil + return +} + +func (r *legacyLayerReader) Read(b []byte) (int, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, io.EOF + } + return r.currentFile.Read(b) + } + return r.backupReader.Read(b) +} + +func (r *legacyLayerReader) Close() error { + r.proceed <- false + <-r.result + r.reset() + return nil +} + +type pendingLink struct { + Path, Target string +} + +type legacyLayerWriter struct { + root string + parentRoots []string + destRoot string + currentFile *os.File + backupWriter *winio.BackupFileWriter + tombstones []string + pathFixed bool + HasUtilityVM bool + uvmDi []dirInfo + addedFiles map[string]bool + PendingLinks []pendingLink +} + +// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer +// transport format to disk. +func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) *legacyLayerWriter { + return &legacyLayerWriter{ + root: root, + parentRoots: parentRoots, + destRoot: destRoot, + addedFiles: make(map[string]bool), + } +} + +func (w *legacyLayerWriter) init() error { + if !w.pathFixed { + path, err := makeLongAbsPath(w.root) + if err != nil { + return err + } + for i, p := range w.parentRoots { + w.parentRoots[i], err = makeLongAbsPath(p) + if err != nil { + return err + } + } + destPath, err := makeLongAbsPath(w.destRoot) + if err != nil { + return err + } + w.root = path + w.destRoot = destPath + w.pathFixed = true + } + return nil +} + +func (w *legacyLayerWriter) initUtilityVM() error { + if !w.HasUtilityVM { + err := os.Mkdir(filepath.Join(w.destRoot, utilityVMPath), 0) + if err != nil { + return err + } + // Server 2016 does not support multiple layers for the utility VM, so + // clone the utility VM from the parent layer into this layer. Use hard + // links to avoid unnecessary copying, since most of the files are + // immutable. + err = cloneTree(filepath.Join(w.parentRoots[0], utilityVMFilesPath), filepath.Join(w.destRoot, utilityVMFilesPath), mutatedUtilityVMFiles) + if err != nil { + return fmt.Errorf("cloning the parent utility VM image failed: %s", err) + } + w.HasUtilityVM = true + } + return nil +} + +func (w *legacyLayerWriter) reset() { + if w.backupWriter != nil { + w.backupWriter.Close() + w.backupWriter = nil + } + if w.currentFile != nil { + w.currentFile.Close() + w.currentFile = nil + } +} + +// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata +func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { + createDisposition := uint32(syscall.CREATE_NEW) + if isDir { + err = os.Mkdir(destPath, 0) + if err != nil { + return nil, err + } + createDisposition = syscall.OPEN_EXISTING + } + + src, err := openFileOrDir(srcPath, syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, syscall.OPEN_EXISTING) + if err != nil { + return nil, err + } + defer src.Close() + srcr := winio.NewBackupFileReader(src, true) + defer srcr.Close() + + fileInfo, err = winio.GetFileBasicInfo(src) + if err != nil { + return nil, err + } + + dest, err := openFileOrDir(destPath, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition) + if err != nil { + return nil, err + } + defer dest.Close() + + err = winio.SetFileBasicInfo(dest, fileInfo) + if err != nil { + return nil, err + } + + destw := winio.NewBackupFileWriter(dest, true) + defer func() { + cerr := destw.Close() + if err == nil { + err = cerr + } + }() + + _, err = io.Copy(destw, srcr) + if err != nil { + return nil, err + } + + return fileInfo, nil +} + +// cloneTree clones a directory tree using hard links. It skips hard links for +// the file names in the provided map and just copies those files. +func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error { + var di []dirInfo + err := filepath.Walk(srcPath, func(srcFilePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(srcPath, srcFilePath) + if err != nil { + return err + } + destFilePath := filepath.Join(destPath, relPath) + + // Directories, reparse points, and files that will be mutated during + // utility VM import must be copied. All other files can be hard linked. + isReparsePoint := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 + if info.IsDir() || isReparsePoint || mutatedFiles[relPath] { + fi, err := copyFileWithMetadata(srcFilePath, destFilePath, info.IsDir()) + if err != nil { + return err + } + if info.IsDir() && !isReparsePoint { + di = append(di, dirInfo{path: destFilePath, fileInfo: *fi}) + } + } else { + err = os.Link(srcFilePath, destFilePath) + if err != nil { + return err + } + } + + // Don't recurse on reparse points. + if info.IsDir() && isReparsePoint { + return filepath.SkipDir + } + + return nil + }) + if err != nil { + return err + } + + return reapplyDirectoryTimes(di) +} + +func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { + w.reset() + err := w.init() + if err != nil { + return err + } + + if name == utilityVMPath { + return w.initUtilityVM() + } + + if hasPathPrefix(name, utilityVMPath) { + if !w.HasUtilityVM { + return errors.New("missing UtilityVM directory") + } + if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { + return errors.New("invalid UtilityVM layer") + } + path := filepath.Join(w.destRoot, name) + createDisposition := uint32(syscall.OPEN_EXISTING) + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + st, err := os.Lstat(path) + if err != nil && !os.IsNotExist(err) { + return err + } + if st != nil { + // Delete the existing file/directory if it is not the same type as this directory. + existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + if err = os.RemoveAll(path); err != nil { + return err + } + st = nil + } + } + if st == nil { + if err = os.Mkdir(path, 0); err != nil { + return err + } + } + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { + w.uvmDi = append(w.uvmDi, dirInfo{path: path, fileInfo: *fileInfo}) + } + } else { + // Overwrite any existing hard link. + err = os.Remove(path) + if err != nil && !os.IsNotExist(err) { + return err + } + createDisposition = syscall.CREATE_NEW + } + + f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + os.Remove(path) + } + }() + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return err + } + + w.backupWriter = winio.NewBackupFileWriter(f, true) + w.currentFile = f + w.addedFiles[name] = true + f = nil + return nil + } + + path := filepath.Join(w.root, name) + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + err := os.Mkdir(path, 0) + if err != nil { + return err + } + path += ".$wcidirs$" + } + + f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.CREATE_NEW) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + os.Remove(path) + } + }() + + strippedFi := *fileInfo + strippedFi.FileAttributes = 0 + err = winio.SetFileBasicInfo(f, &strippedFi) + if err != nil { + return err + } + + if hasPathPrefix(name, hivesPath) { + w.backupWriter = winio.NewBackupFileWriter(f, false) + } else { + // The file attributes are written before the stream. + err = binary.Write(f, binary.LittleEndian, uint32(fileInfo.FileAttributes)) + if err != nil { + return err + } + } + + w.currentFile = f + w.addedFiles[name] = true + f = nil + return nil +} + +func (w *legacyLayerWriter) AddLink(name string, target string) error { + w.reset() + err := w.init() + if err != nil { + return err + } + + var roots []string + if hasPathPrefix(target, filesPath) { + // Look for cross-layer hard link targets in the parent layers, since + // nothing is in the destination path yet. + roots = w.parentRoots + } else if hasPathPrefix(target, utilityVMFilesPath) { + // Since the utility VM is fully cloned into the destination path + // already, look for cross-layer hard link targets directly in the + // destination path. + roots = []string{w.destRoot} + } + + if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { + return errors.New("invalid hard link in layer") + } + + // Find to try the target of the link in a previously added file. If that + // fails, search in parent layers. + var selectedRoot string + if _, ok := w.addedFiles[target]; ok { + selectedRoot = w.destRoot + } else { + for _, r := range roots { + if _, err = os.Lstat(filepath.Join(r, target)); err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + selectedRoot = r + break + } + } + if selectedRoot == "" { + return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) + } + } + // The link can't be written until after the ImportLayer call. + w.PendingLinks = append(w.PendingLinks, pendingLink{ + Path: filepath.Join(w.destRoot, name), + Target: filepath.Join(selectedRoot, target), + }) + w.addedFiles[name] = true + return nil +} + +func (w *legacyLayerWriter) Remove(name string) error { + if hasPathPrefix(name, filesPath) { + w.tombstones = append(w.tombstones, name[len(filesPath)+1:]) + } else if hasPathPrefix(name, utilityVMFilesPath) { + err := w.initUtilityVM() + if err != nil { + return err + } + // Make sure the path exists; os.RemoveAll will not fail if the file is + // already gone, and this needs to be a fatal error for diagnostics + // purposes. + path := filepath.Join(w.destRoot, name) + if _, err := os.Lstat(path); err != nil { + return err + } + err = os.RemoveAll(path) + if err != nil { + return err + } + } else { + return fmt.Errorf("invalid tombstone %s", name) + } + + return nil +} + +func (w *legacyLayerWriter) Write(b []byte) (int, error) { + if w.backupWriter == nil { + if w.currentFile == nil { + return 0, errors.New("closed") + } + return w.currentFile.Write(b) + } + return w.backupWriter.Write(b) +} + +func (w *legacyLayerWriter) Close() error { + w.reset() + err := w.init() + if err != nil { + return err + } + tf, err := os.Create(filepath.Join(w.root, "tombstones.txt")) + if err != nil { + return err + } + defer tf.Close() + _, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n")) + if err != nil { + return err + } + for _, t := range w.tombstones { + _, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n")) + if err != nil { + return err + } + } + if w.HasUtilityVM { + err = reapplyDirectoryTimes(w.uvmDi) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/nametoguid.go new file mode 100644 index 0000000..1a522f9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/nametoguid.go @@ -0,0 +1,20 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// NameToGuid converts the given string into a GUID using the algorithm in the +// Host Compute Service, ensuring GUIDs generated with the same string are common +// across all clients. +func NameToGuid(name string) (id GUID, err error) { + title := "hcsshim::NameToGuid " + logrus.Debugf(title+"Name %s", name) + + err = nameToGuid(name, &id) + if err != nil { + err = makeErrorf(err, title, "name=%s", name) + logrus.Error(err) + return + } + + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/preparelayer.go new file mode 100644 index 0000000..2791683 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/preparelayer.go @@ -0,0 +1,46 @@ +package hcsshim + +import ( + "sync" + + "github.com/Sirupsen/logrus" +) + +var prepareLayerLock sync.Mutex + +// PrepareLayer finds a mounted read-write layer matching layerId and enables the +// the filesystem filter for use on that layer. This requires the paths to all +// parent layers, and is necessary in order to view or interact with the layer +// as an actual filesystem (reading and writing files, creating directories, etc). +// Disabling the filter must be done via UnprepareLayer. +func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { + title := "hcsshim::PrepareLayer " + logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return err + } + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + // This lock is a temporary workaround for a Windows bug. Only allowing one + // call to prepareLayer at a time vastly reduces the chance of a timeout. + prepareLayerLock.Lock() + defer prepareLayerLock.Unlock() + err = prepareLayer(&infop, layerId, layers) + if err != nil { + err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour) + logrus.Error(err) + return err + } + + logrus.Debugf(title+"succeeded flavour=%d layerId=%s", info.Flavour, layerId) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go new file mode 100644 index 0000000..4ef0ed3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/process.go @@ -0,0 +1,384 @@ +package hcsshim + +import ( + "encoding/json" + "io" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +// ContainerError is an error encountered in HCS +type process struct { + handleLock sync.RWMutex + handle hcsProcess + processID int + container *container + cachedPipes *cachedPipes + callbackNumber uintptr +} + +type cachedPipes struct { + stdIn syscall.Handle + stdOut syscall.Handle + stdErr syscall.Handle +} + +type processModifyRequest struct { + Operation string + ConsoleSize *consoleSize `json:",omitempty"` + CloseHandle *closeHandle `json:",omitempty"` +} + +type consoleSize struct { + Height uint16 + Width uint16 +} + +type closeHandle struct { + Handle string +} + +type processStatus struct { + ProcessID uint32 + Exited bool + ExitCode uint32 + LastWaitResult int32 +} + +const ( + stdIn string = "StdIn" + stdOut string = "StdOut" + stdErr string = "StdErr" +) + +const ( + modifyConsoleSize string = "ConsoleSize" + modifyCloseHandle string = "CloseHandle" +) + +// Pid returns the process ID of the process within the container. +func (process *process) Pid() int { + return process.processID +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *process) Kill() error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + operation := "Kill" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + if process.handle == 0 { + return makeProcessError(process, operation, "", ErrAlreadyClosed) + } + + var resultp *uint16 + err := hcsTerminateProcess(process.handle, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return makeProcessError(process, operation, "", err) + } + + logrus.Debugf(title+" succeeded processid=%d", process.processID) + return nil +} + +// Wait waits for the process to exit. +func (process *process) Wait() error { + operation := "Wait" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil) + if err != nil { + return makeProcessError(process, operation, "", err) + } + + logrus.Debugf(title+" succeeded processid=%d", process.processID) + return nil +} + +// WaitTimeout waits for the process to exit or the duration to elapse. It returns +// false if timeout occurs. +func (process *process) WaitTimeout(timeout time.Duration) error { + operation := "WaitTimeout" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout) + if err != nil { + return makeProcessError(process, operation, "", err) + } + + logrus.Debugf(title+" succeeded processid=%d", process.processID) + return nil +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *process) ExitCode() (int, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + operation := "ExitCode" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + if process.handle == 0 { + return 0, makeProcessError(process, operation, "", ErrAlreadyClosed) + } + + properties, err := process.properties() + if err != nil { + return 0, makeProcessError(process, operation, "", err) + } + + if properties.Exited == false { + return 0, makeProcessError(process, operation, "", ErrInvalidProcessState) + } + + if properties.LastWaitResult != 0 { + return 0, makeProcessError(process, operation, "", syscall.Errno(properties.LastWaitResult)) + } + + logrus.Debugf(title+" succeeded processid=%d exitCode=%d", process.processID, properties.ExitCode) + return int(properties.ExitCode), nil +} + +// ResizeConsole resizes the console of the process. +func (process *process) ResizeConsole(width, height uint16) error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + operation := "ResizeConsole" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + if process.handle == 0 { + return makeProcessError(process, operation, "", ErrAlreadyClosed) + } + + modifyRequest := processModifyRequest{ + Operation: modifyConsoleSize, + ConsoleSize: &consoleSize{ + Height: height, + Width: width, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + modifyRequestStr := string(modifyRequestb) + + var resultp *uint16 + err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return makeProcessError(process, operation, "", err) + } + + logrus.Debugf(title+" succeeded processid=%d", process.processID) + return nil +} + +func (process *process) properties() (*processStatus, error) { + operation := "properties" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + var ( + resultp *uint16 + propertiesp *uint16 + ) + err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return nil, err + } + + if propertiesp == nil { + return nil, ErrUnexpectedValue + } + propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp) + + properties := &processStatus{} + if err := json.Unmarshal(propertiesRaw, properties); err != nil { + return nil, err + } + + logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw) + return properties, nil +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes; it should be possible to +// call this multiple times to get multiple interfaces. +func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + operation := "Stdio" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + if process.handle == 0 { + return nil, nil, nil, makeProcessError(process, operation, "", ErrAlreadyClosed) + } + + var stdIn, stdOut, stdErr syscall.Handle + + if process.cachedPipes == nil { + var ( + processInfo hcsProcessInformation + resultp *uint16 + ) + err := hcsGetProcessInfo(process.handle, &processInfo, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, "", err) + } + + stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError + } else { + // Use cached pipes + stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr + + // Invalidate the cache + process.cachedPipes = nil + } + + pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr}) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, "", err) + } + + logrus.Debugf(title+" succeeded processid=%d", process.processID) + return pipes[0], pipes[1], pipes[2], nil +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *process) CloseStdin() error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + operation := "CloseStdin" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + if process.handle == 0 { + return makeProcessError(process, operation, "", ErrAlreadyClosed) + } + + modifyRequest := processModifyRequest{ + Operation: modifyCloseHandle, + CloseHandle: &closeHandle{ + Handle: stdIn, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + modifyRequestStr := string(modifyRequestb) + + var resultp *uint16 + err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp) + err = processHcsResult(err, resultp) + if err != nil { + return makeProcessError(process, operation, "", err) + } + + logrus.Debugf(title+" succeeded processid=%d", process.processID) + return nil +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *process) Close() error { + process.handleLock.Lock() + defer process.handleLock.Unlock() + operation := "Close" + title := "HCSShim::Process::" + operation + logrus.Debugf(title+" processid=%d", process.processID) + + // Don't double free this + if process.handle == 0 { + return nil + } + + if err := process.unregisterCallback(); err != nil { + return makeProcessError(process, operation, "", err) + } + + if err := hcsCloseProcess(process.handle); err != nil { + return makeProcessError(process, operation, "", err) + } + + process.handle = 0 + + logrus.Debugf(title+" succeeded processid=%d", process.processID) + return nil +} + +func (process *process) registerCallback() error { + context := ¬ifcationWatcherContext{ + channels: newChannels(), + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = context + callbackMapLock.Unlock() + + var callbackHandle hcsCallback + err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle) + if err != nil { + return err + } + context.handle = callbackHandle + process.callbackNumber = callbackNumber + + return nil +} + +func (process *process) unregisterCallback() error { + callbackNumber := process.callbackNumber + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return nil + } + + handle := context.handle + + if handle == 0 { + return nil + } + + // hcsUnregisterProcessCallback has its own syncronization + // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := hcsUnregisterProcessCallback(handle) + if err != nil { + return err + } + + closeChannels(context.channels) + + callbackMapLock.Lock() + callbackMap[callbackNumber] = nil + callbackMapLock.Unlock() + + handle = 0 + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/processimage.go b/vendor/github.com/Microsoft/hcsshim/processimage.go new file mode 100644 index 0000000..fadb1b9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/processimage.go @@ -0,0 +1,23 @@ +package hcsshim + +import "os" + +// ProcessBaseLayer post-processes a base layer that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessBaseLayer(path string) error { + err := processBaseImage(path) + if err != nil { + return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err} + } + return nil +} + +// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessUtilityVMImage(path string) error { + err := processUtilityImage(path) + if err != nil { + return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/unpreparelayer.go new file mode 100644 index 0000000..d0ead0b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/unpreparelayer.go @@ -0,0 +1,27 @@ +package hcsshim + +import "github.com/Sirupsen/logrus" + +// UnprepareLayer disables the filesystem filter for the read-write layer with +// the given id. +func UnprepareLayer(info DriverInfo, layerId string) error { + title := "hcsshim::UnprepareLayer " + logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId) + + // Convert info to API calling convention + infop, err := convertDriverInfo(info) + if err != nil { + logrus.Error(err) + return err + } + + err = unprepareLayer(&infop, layerId) + if err != nil { + err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour) + logrus.Error(err) + return err + } + + logrus.Debugf(title+"succeeded flavour %d layerId=%s", info.Flavour, layerId) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/utils.go b/vendor/github.com/Microsoft/hcsshim/utils.go new file mode 100644 index 0000000..bd6e2d9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/utils.go @@ -0,0 +1,33 @@ +package hcsshim + +import ( + "io" + "syscall" + + "github.com/Microsoft/go-winio" +) + +// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles +// if there is an error. +func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { + fs := make([]io.ReadWriteCloser, len(hs)) + for i, h := range hs { + if h != syscall.Handle(0) { + if err == nil { + fs[i], err = winio.MakeOpenFile(h) + } + if err != nil { + syscall.Close(h) + } + } + } + if err != nil { + for _, f := range fs { + if f != nil { + f.Close() + } + } + return nil, err + } + return fs, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/version.go b/vendor/github.com/Microsoft/hcsshim/version.go new file mode 100644 index 0000000..ae10c23 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/version.go @@ -0,0 +1,7 @@ +package hcsshim + +// IsTP4 returns whether the currently running Windows build is at least TP4. +func IsTP4() bool { + // HNSCall was not present in TP4 + return procHNSCall.Find() != nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/waithelper.go b/vendor/github.com/Microsoft/hcsshim/waithelper.go new file mode 100644 index 0000000..828d148 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/waithelper.go @@ -0,0 +1,63 @@ +package hcsshim + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { + err = processHcsResult(err, resultp) + if IsPending(err) { + return waitForNotification(callbackNumber, expectedNotification, timeout) + } + + return err +} + +func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { + callbackMapLock.RLock() + channels := callbackMap[callbackNumber].channels + callbackMapLock.RUnlock() + + expectedChannel := channels[expectedNotification] + if expectedChannel == nil { + logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification) + return ErrInvalidNotificationType + } + + var c <-chan time.Time + if timeout != nil { + timer := time.NewTimer(*timeout) + c = timer.C + defer timer.Stop() + } + + select { + case err, ok := <-expectedChannel: + if !ok { + return ErrHandleClose + } + return err + case err, ok := <-channels[hcsNotificationSystemExited]: + if !ok { + return ErrHandleClose + } + // If the expected notification is hcsNotificationSystemExited which of the two selects + // chosen is random. Return the raw error if hcsNotificationSystemExited is expected + if channels[hcsNotificationSystemExited] == expectedChannel { + return err + } + return ErrUnexpectedContainerExit + case _, ok := <-channels[hcsNotificationServiceDisconnect]: + if !ok { + return ErrHandleClose + } + // hcsNotificationServiceDisconnect should never be an expected notification + // it does not need the same handling as hcsNotificationSystemExited + return ErrUnexpectedProcessAbort + case <-c: + return ErrTimeout + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/zhcsshim.go b/vendor/github.com/Microsoft/hcsshim/zhcsshim.go new file mode 100644 index 0000000..5d1a851 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/zhcsshim.go @@ -0,0 +1,1042 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package hcsshim + +import ( + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modole32 = windows.NewLazySystemDLL("ole32.dll") + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") + procActivateLayer = modvmcompute.NewProc("ActivateLayer") + procCopyLayer = modvmcompute.NewProc("CopyLayer") + procCreateLayer = modvmcompute.NewProc("CreateLayer") + procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") + procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") + procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") + procDestroyLayer = modvmcompute.NewProc("DestroyLayer") + procExportLayer = modvmcompute.NewProc("ExportLayer") + procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") + procGetBaseImages = modvmcompute.NewProc("GetBaseImages") + procImportLayer = modvmcompute.NewProc("ImportLayer") + procLayerExists = modvmcompute.NewProc("LayerExists") + procNameToGuid = modvmcompute.NewProc("NameToGuid") + procPrepareLayer = modvmcompute.NewProc("PrepareLayer") + procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") + procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") + procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") + procImportLayerBegin = modvmcompute.NewProc("ImportLayerBegin") + procImportLayerNext = modvmcompute.NewProc("ImportLayerNext") + procImportLayerWrite = modvmcompute.NewProc("ImportLayerWrite") + procImportLayerEnd = modvmcompute.NewProc("ImportLayerEnd") + procExportLayerBegin = modvmcompute.NewProc("ExportLayerBegin") + procExportLayerNext = modvmcompute.NewProc("ExportLayerNext") + procExportLayerRead = modvmcompute.NewProc("ExportLayerRead") + procExportLayerEnd = modvmcompute.NewProc("ExportLayerEnd") + procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") + procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") + procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") + procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") + procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") + procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") + procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") + procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") + procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") + procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") + procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") + procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") + procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") + procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") + procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") + procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") + procHNSCall = modvmcompute.NewProc("HNSCall") +) + +func coTaskMemFree(buffer unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) + return +} + +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func activateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _activateLayer(info, _p0) +} + +func _activateLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procActivateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(srcId) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(dstId) + if hr != nil { + return + } + return _copyLayer(info, _p0, _p1, descriptors) +} + +func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procCopyLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func createLayer(info *driverInfo, id string, parent string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(parent) + if hr != nil { + return + } + return _createLayer(info, _p0, _p1) +} + +func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { + if hr = procCreateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(parent) + if hr != nil { + return + } + return _createSandboxLayer(info, _p0, _p1, descriptors) +} + +func _createSandboxLayer(info *driverInfo, id *uint16, parent *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procCreateSandboxLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _expandSandboxSize(info, _p0, size) +} + +func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { + if hr = procExpandSandboxSize.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func deactivateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _deactivateLayer(info, _p0) +} + +func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procDeactivateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func destroyLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _destroyLayer(info, _p0) +} + +func _destroyLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procDestroyLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _exportLayer(info, _p0, _p1, descriptors) +} + +func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _getLayerMountPath(info, _p0, length, buffer) +} + +func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { + if hr = procGetLayerMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func getBaseImages(buffer **uint16) (hr error) { + if hr = procGetBaseImages.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _importLayer(info, _p0, _p1, descriptors) +} + +func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _layerExists(info, _p0, exists) +} + +func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { + if hr = procLayerExists.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func nameToGuid(name string, guid *GUID) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(name) + if hr != nil { + return + } + return _nameToGuid(_p0, guid) +} + +func _nameToGuid(name *uint16, guid *GUID) (hr error) { + if hr = procNameToGuid.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _prepareLayer(info, _p0, descriptors) +} + +func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procPrepareLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func unprepareLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _unprepareLayer(info, _p0) +} + +func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procUnprepareLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func processBaseImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processBaseImage(_p0) +} + +func _processBaseImage(path *uint16) (hr error) { + if hr = procProcessBaseImage.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func processUtilityImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processUtilityImage(_p0) +} + +func _processUtilityImage(path *uint16) (hr error) { + if hr = procProcessUtilityImage.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _importLayerBegin(info, _p0, descriptors, context) +} + +func _importLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procImportLayerBegin.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procImportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(fileName) + if hr != nil { + return + } + return _importLayerNext(context, _p0, fileInfo) +} + +func _importLayerNext(context uintptr, fileName *uint16, fileInfo *winio.FileBasicInfo) (hr error) { + if hr = procImportLayerNext.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procImportLayerNext.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func importLayerWrite(context uintptr, buffer []byte) (hr error) { + var _p0 *byte + if len(buffer) > 0 { + _p0 = &buffer[0] + } + if hr = procImportLayerWrite.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procImportLayerWrite.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func importLayerEnd(context uintptr) (hr error) { + if hr = procImportLayerEnd.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procImportLayerEnd.Addr(), 1, uintptr(context), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _exportLayerBegin(info, _p0, descriptors, context) +} + +func _exportLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procExportLayerBegin.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procExportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) { + if hr = procExportLayerNext.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procExportLayerNext.Addr(), 5, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)), uintptr(unsafe.Pointer(fileSize)), uintptr(unsafe.Pointer(deleted)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) { + var _p0 *byte + if len(buffer) > 0 { + _p0 = &buffer[0] + } + if hr = procExportLayerRead.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procExportLayerRead.Addr(), 4, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)), uintptr(unsafe.Pointer(bytesRead)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func exportLayerEnd(context uintptr) (hr error) { + if hr = procExportLayerEnd.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procExportLayerEnd.Addr(), 1, uintptr(context), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcsEnumerateComputeSystems(_p0, computeSystems, result) +} + +func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { + if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) +} + +func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) { + if hr = procHcsCreateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _hcsOpenComputeSystem(_p0, computeSystem, result) +} + +func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16) (hr error) { + if hr = procHcsOpenComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) { + if hr = procHcsCloseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsStartComputeSystem(computeSystem, _p0, result) +} + +func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsStartComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsShutdownComputeSystem(computeSystem, _p0, result) +} + +func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsShutdownComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsTerminateComputeSystem(computeSystem, _p0, result) +} + +func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsTerminateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsPauseComputeSystem(computeSystem, _p0, result) +} + +func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsPauseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsResumeComputeSystem(computeSystem, _p0, result) +} + +func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsResumeComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) +} + +func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsModifyComputeSystem(computeSystem, _p0, result) +} + +func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, result **uint16) (hr error) { + if hr = procHcsModifyComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) { + if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) { + if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(processParameters) + if hr != nil { + return + } + return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) +} + +func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) { + if hr = procHcsCreateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) { + if hr = procHcsOpenProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsCloseProcess(process hcsProcess) (hr error) { + if hr = procHcsCloseProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) { + if hr = procHcsTerminateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) { + if hr = procHcsGetProcessInfo.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) { + if hr = procHcsGetProcessProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyProcess(process, _p0, result) +} + +func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetServiceProperties(_p0, properties, result) +} + +func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetServiceProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) { + if hr = procHcsRegisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) { + if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyServiceSettings(_p0, result) +} + +func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyServiceSettings.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} + +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + if hr = procHNSCall.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + return +} diff --git a/vendor/github.com/Nvveen/Gotty/LICENSE b/vendor/github.com/Nvveen/Gotty/LICENSE new file mode 100644 index 0000000..0b71c97 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those +of the authors and should not be interpreted as representing official policies, +either expressed or implied, of the FreeBSD Project. diff --git a/vendor/github.com/Nvveen/Gotty/README b/vendor/github.com/Nvveen/Gotty/README new file mode 100644 index 0000000..a6b0d9a --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/README @@ -0,0 +1,5 @@ +Gotty is a library written in Go that determines and reads termcap database +files to produce an interface for interacting with the capabilities of a +terminal. +See the godoc documentation or the source code for more information about +function usage. diff --git a/vendor/github.com/Nvveen/Gotty/attributes.go b/vendor/github.com/Nvveen/Gotty/attributes.go new file mode 100644 index 0000000..a4c005f --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/attributes.go @@ -0,0 +1,514 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +// Boolean capabilities +var BoolAttr = [...]string{ + "auto_left_margin", "bw", + "auto_right_margin", "am", + "no_esc_ctlc", "xsb", + "ceol_standout_glitch", "xhp", + "eat_newline_glitch", "xenl", + "erase_overstrike", "eo", + "generic_type", "gn", + "hard_copy", "hc", + "has_meta_key", "km", + "has_status_line", "hs", + "insert_null_glitch", "in", + "memory_above", "da", + "memory_below", "db", + "move_insert_mode", "mir", + "move_standout_mode", "msgr", + "over_strike", "os", + "status_line_esc_ok", "eslok", + "dest_tabs_magic_smso", "xt", + "tilde_glitch", "hz", + "transparent_underline", "ul", + "xon_xoff", "nxon", + "needs_xon_xoff", "nxon", + "prtr_silent", "mc5i", + "hard_cursor", "chts", + "non_rev_rmcup", "nrrmc", + "no_pad_char", "npc", + "non_dest_scroll_region", "ndscr", + "can_change", "ccc", + "back_color_erase", "bce", + "hue_lightness_saturation", "hls", + "col_addr_glitch", "xhpa", + "cr_cancels_micro_mode", "crxm", + "has_print_wheel", "daisy", + "row_addr_glitch", "xvpa", + "semi_auto_right_margin", "sam", + "cpi_changes_res", "cpix", + "lpi_changes_res", "lpix", + "backspaces_with_bs", "", + "crt_no_scrolling", "", + "no_correctly_working_cr", "", + "gnu_has_meta_key", "", + "linefeed_is_newline", "", + "has_hardware_tabs", "", + "return_does_clr_eol", "", +} + +// Numerical capabilities +var NumAttr = [...]string{ + "columns", "cols", + "init_tabs", "it", + "lines", "lines", + "lines_of_memory", "lm", + "magic_cookie_glitch", "xmc", + "padding_baud_rate", "pb", + "virtual_terminal", "vt", + "width_status_line", "wsl", + "num_labels", "nlab", + "label_height", "lh", + "label_width", "lw", + "max_attributes", "ma", + "maximum_windows", "wnum", + "max_colors", "colors", + "max_pairs", "pairs", + "no_color_video", "ncv", + "buffer_capacity", "bufsz", + "dot_vert_spacing", "spinv", + "dot_horz_spacing", "spinh", + "max_micro_address", "maddr", + "max_micro_jump", "mjump", + "micro_col_size", "mcs", + "micro_line_size", "mls", + "number_of_pins", "npins", + "output_res_char", "orc", + "output_res_line", "orl", + "output_res_horz_inch", "orhi", + "output_res_vert_inch", "orvi", + "print_rate", "cps", + "wide_char_size", "widcs", + "buttons", "btns", + "bit_image_entwining", "bitwin", + "bit_image_type", "bitype", + "magic_cookie_glitch_ul", "", + "carriage_return_delay", "", + "new_line_delay", "", + "backspace_delay", "", + "horizontal_tab_delay", "", + "number_of_function_keys", "", +} + +// String capabilities +var StrAttr = [...]string{ + "back_tab", "cbt", + "bell", "bel", + "carriage_return", "cr", + "change_scroll_region", "csr", + "clear_all_tabs", "tbc", + "clear_screen", "clear", + "clr_eol", "el", + "clr_eos", "ed", + "column_address", "hpa", + "command_character", "cmdch", + "cursor_address", "cup", + "cursor_down", "cud1", + "cursor_home", "home", + "cursor_invisible", "civis", + "cursor_left", "cub1", + "cursor_mem_address", "mrcup", + "cursor_normal", "cnorm", + "cursor_right", "cuf1", + "cursor_to_ll", "ll", + "cursor_up", "cuu1", + "cursor_visible", "cvvis", + "delete_character", "dch1", + "delete_line", "dl1", + "dis_status_line", "dsl", + "down_half_line", "hd", + "enter_alt_charset_mode", "smacs", + "enter_blink_mode", "blink", + "enter_bold_mode", "bold", + "enter_ca_mode", "smcup", + "enter_delete_mode", "smdc", + "enter_dim_mode", "dim", + "enter_insert_mode", "smir", + "enter_secure_mode", "invis", + "enter_protected_mode", "prot", + "enter_reverse_mode", "rev", + "enter_standout_mode", "smso", + "enter_underline_mode", "smul", + "erase_chars", "ech", + "exit_alt_charset_mode", "rmacs", + "exit_attribute_mode", "sgr0", + "exit_ca_mode", "rmcup", + "exit_delete_mode", "rmdc", + "exit_insert_mode", "rmir", + "exit_standout_mode", "rmso", + "exit_underline_mode", "rmul", + "flash_screen", "flash", + "form_feed", "ff", + "from_status_line", "fsl", + "init_1string", "is1", + "init_2string", "is2", + "init_3string", "is3", + "init_file", "if", + "insert_character", "ich1", + "insert_line", "il1", + "insert_padding", "ip", + "key_backspace", "kbs", + "key_catab", "ktbc", + "key_clear", "kclr", + "key_ctab", "kctab", + "key_dc", "kdch1", + "key_dl", "kdl1", + "key_down", "kcud1", + "key_eic", "krmir", + "key_eol", "kel", + "key_eos", "ked", + "key_f0", "kf0", + "key_f1", "kf1", + "key_f10", "kf10", + "key_f2", "kf2", + "key_f3", "kf3", + "key_f4", "kf4", + "key_f5", "kf5", + "key_f6", "kf6", + "key_f7", "kf7", + "key_f8", "kf8", + "key_f9", "kf9", + "key_home", "khome", + "key_ic", "kich1", + "key_il", "kil1", + "key_left", "kcub1", + "key_ll", "kll", + "key_npage", "knp", + "key_ppage", "kpp", + "key_right", "kcuf1", + "key_sf", "kind", + "key_sr", "kri", + "key_stab", "khts", + "key_up", "kcuu1", + "keypad_local", "rmkx", + "keypad_xmit", "smkx", + "lab_f0", "lf0", + "lab_f1", "lf1", + "lab_f10", "lf10", + "lab_f2", "lf2", + "lab_f3", "lf3", + "lab_f4", "lf4", + "lab_f5", "lf5", + "lab_f6", "lf6", + "lab_f7", "lf7", + "lab_f8", "lf8", + "lab_f9", "lf9", + "meta_off", "rmm", + "meta_on", "smm", + "newline", "_glitch", + "pad_char", "npc", + "parm_dch", "dch", + "parm_delete_line", "dl", + "parm_down_cursor", "cud", + "parm_ich", "ich", + "parm_index", "indn", + "parm_insert_line", "il", + "parm_left_cursor", "cub", + "parm_right_cursor", "cuf", + "parm_rindex", "rin", + "parm_up_cursor", "cuu", + "pkey_key", "pfkey", + "pkey_local", "pfloc", + "pkey_xmit", "pfx", + "print_screen", "mc0", + "prtr_off", "mc4", + "prtr_on", "mc5", + "repeat_char", "rep", + "reset_1string", "rs1", + "reset_2string", "rs2", + "reset_3string", "rs3", + "reset_file", "rf", + "restore_cursor", "rc", + "row_address", "mvpa", + "save_cursor", "row_address", + "scroll_forward", "ind", + "scroll_reverse", "ri", + "set_attributes", "sgr", + "set_tab", "hts", + "set_window", "wind", + "tab", "s_magic_smso", + "to_status_line", "tsl", + "underline_char", "uc", + "up_half_line", "hu", + "init_prog", "iprog", + "key_a1", "ka1", + "key_a3", "ka3", + "key_b2", "kb2", + "key_c1", "kc1", + "key_c3", "kc3", + "prtr_non", "mc5p", + "char_padding", "rmp", + "acs_chars", "acsc", + "plab_norm", "pln", + "key_btab", "kcbt", + "enter_xon_mode", "smxon", + "exit_xon_mode", "rmxon", + "enter_am_mode", "smam", + "exit_am_mode", "rmam", + "xon_character", "xonc", + "xoff_character", "xoffc", + "ena_acs", "enacs", + "label_on", "smln", + "label_off", "rmln", + "key_beg", "kbeg", + "key_cancel", "kcan", + "key_close", "kclo", + "key_command", "kcmd", + "key_copy", "kcpy", + "key_create", "kcrt", + "key_end", "kend", + "key_enter", "kent", + "key_exit", "kext", + "key_find", "kfnd", + "key_help", "khlp", + "key_mark", "kmrk", + "key_message", "kmsg", + "key_move", "kmov", + "key_next", "knxt", + "key_open", "kopn", + "key_options", "kopt", + "key_previous", "kprv", + "key_print", "kprt", + "key_redo", "krdo", + "key_reference", "kref", + "key_refresh", "krfr", + "key_replace", "krpl", + "key_restart", "krst", + "key_resume", "kres", + "key_save", "ksav", + "key_suspend", "kspd", + "key_undo", "kund", + "key_sbeg", "kBEG", + "key_scancel", "kCAN", + "key_scommand", "kCMD", + "key_scopy", "kCPY", + "key_screate", "kCRT", + "key_sdc", "kDC", + "key_sdl", "kDL", + "key_select", "kslt", + "key_send", "kEND", + "key_seol", "kEOL", + "key_sexit", "kEXT", + "key_sfind", "kFND", + "key_shelp", "kHLP", + "key_shome", "kHOM", + "key_sic", "kIC", + "key_sleft", "kLFT", + "key_smessage", "kMSG", + "key_smove", "kMOV", + "key_snext", "kNXT", + "key_soptions", "kOPT", + "key_sprevious", "kPRV", + "key_sprint", "kPRT", + "key_sredo", "kRDO", + "key_sreplace", "kRPL", + "key_sright", "kRIT", + "key_srsume", "kRES", + "key_ssave", "kSAV", + "key_ssuspend", "kSPD", + "key_sundo", "kUND", + "req_for_input", "rfi", + "key_f11", "kf11", + "key_f12", "kf12", + "key_f13", "kf13", + "key_f14", "kf14", + "key_f15", "kf15", + "key_f16", "kf16", + "key_f17", "kf17", + "key_f18", "kf18", + "key_f19", "kf19", + "key_f20", "kf20", + "key_f21", "kf21", + "key_f22", "kf22", + "key_f23", "kf23", + "key_f24", "kf24", + "key_f25", "kf25", + "key_f26", "kf26", + "key_f27", "kf27", + "key_f28", "kf28", + "key_f29", "kf29", + "key_f30", "kf30", + "key_f31", "kf31", + "key_f32", "kf32", + "key_f33", "kf33", + "key_f34", "kf34", + "key_f35", "kf35", + "key_f36", "kf36", + "key_f37", "kf37", + "key_f38", "kf38", + "key_f39", "kf39", + "key_f40", "kf40", + "key_f41", "kf41", + "key_f42", "kf42", + "key_f43", "kf43", + "key_f44", "kf44", + "key_f45", "kf45", + "key_f46", "kf46", + "key_f47", "kf47", + "key_f48", "kf48", + "key_f49", "kf49", + "key_f50", "kf50", + "key_f51", "kf51", + "key_f52", "kf52", + "key_f53", "kf53", + "key_f54", "kf54", + "key_f55", "kf55", + "key_f56", "kf56", + "key_f57", "kf57", + "key_f58", "kf58", + "key_f59", "kf59", + "key_f60", "kf60", + "key_f61", "kf61", + "key_f62", "kf62", + "key_f63", "kf63", + "clr_bol", "el1", + "clear_margins", "mgc", + "set_left_margin", "smgl", + "set_right_margin", "smgr", + "label_format", "fln", + "set_clock", "sclk", + "display_clock", "dclk", + "remove_clock", "rmclk", + "create_window", "cwin", + "goto_window", "wingo", + "hangup", "hup", + "dial_phone", "dial", + "quick_dial", "qdial", + "tone", "tone", + "pulse", "pulse", + "flash_hook", "hook", + "fixed_pause", "pause", + "wait_tone", "wait", + "user0", "u0", + "user1", "u1", + "user2", "u2", + "user3", "u3", + "user4", "u4", + "user5", "u5", + "user6", "u6", + "user7", "u7", + "user8", "u8", + "user9", "u9", + "orig_pair", "op", + "orig_colors", "oc", + "initialize_color", "initc", + "initialize_pair", "initp", + "set_color_pair", "scp", + "set_foreground", "setf", + "set_background", "setb", + "change_char_pitch", "cpi", + "change_line_pitch", "lpi", + "change_res_horz", "chr", + "change_res_vert", "cvr", + "define_char", "defc", + "enter_doublewide_mode", "swidm", + "enter_draft_quality", "sdrfq", + "enter_italics_mode", "sitm", + "enter_leftward_mode", "slm", + "enter_micro_mode", "smicm", + "enter_near_letter_quality", "snlq", + "enter_normal_quality", "snrmq", + "enter_shadow_mode", "sshm", + "enter_subscript_mode", "ssubm", + "enter_superscript_mode", "ssupm", + "enter_upward_mode", "sum", + "exit_doublewide_mode", "rwidm", + "exit_italics_mode", "ritm", + "exit_leftward_mode", "rlm", + "exit_micro_mode", "rmicm", + "exit_shadow_mode", "rshm", + "exit_subscript_mode", "rsubm", + "exit_superscript_mode", "rsupm", + "exit_upward_mode", "rum", + "micro_column_address", "mhpa", + "micro_down", "mcud1", + "micro_left", "mcub1", + "micro_right", "mcuf1", + "micro_row_address", "mvpa", + "micro_up", "mcuu1", + "order_of_pins", "porder", + "parm_down_micro", "mcud", + "parm_left_micro", "mcub", + "parm_right_micro", "mcuf", + "parm_up_micro", "mcuu", + "select_char_set", "scs", + "set_bottom_margin", "smgb", + "set_bottom_margin_parm", "smgbp", + "set_left_margin_parm", "smglp", + "set_right_margin_parm", "smgrp", + "set_top_margin", "smgt", + "set_top_margin_parm", "smgtp", + "start_bit_image", "sbim", + "start_char_set_def", "scsd", + "stop_bit_image", "rbim", + "stop_char_set_def", "rcsd", + "subscript_characters", "subcs", + "superscript_characters", "supcs", + "these_cause_cr", "docr", + "zero_motion", "zerom", + "char_set_names", "csnm", + "key_mouse", "kmous", + "mouse_info", "minfo", + "req_mouse_pos", "reqmp", + "get_mouse", "getm", + "set_a_foreground", "setaf", + "set_a_background", "setab", + "pkey_plab", "pfxl", + "device_type", "devt", + "code_set_init", "csin", + "set0_des_seq", "s0ds", + "set1_des_seq", "s1ds", + "set2_des_seq", "s2ds", + "set3_des_seq", "s3ds", + "set_lr_margin", "smglr", + "set_tb_margin", "smgtb", + "bit_image_repeat", "birep", + "bit_image_newline", "binel", + "bit_image_carriage_return", "bicr", + "color_names", "colornm", + "define_bit_image_region", "defbi", + "end_bit_image_region", "endbi", + "set_color_band", "setcolor", + "set_page_length", "slines", + "display_pc_char", "dispc", + "enter_pc_charset_mode", "smpch", + "exit_pc_charset_mode", "rmpch", + "enter_scancode_mode", "smsc", + "exit_scancode_mode", "rmsc", + "pc_term_options", "pctrm", + "scancode_escape", "scesc", + "alt_scancode_esc", "scesa", + "enter_horizontal_hl_mode", "ehhlm", + "enter_left_hl_mode", "elhlm", + "enter_low_hl_mode", "elohlm", + "enter_right_hl_mode", "erhlm", + "enter_top_hl_mode", "ethlm", + "enter_vertical_hl_mode", "evhlm", + "set_a_attributes", "sgr1", + "set_pglen_inch", "slength", + "termcap_init2", "", + "termcap_reset", "", + "linefeed_if_not_lf", "", + "backspace_if_not_bs", "", + "other_non_function_keys", "", + "arrow_key_map", "", + "acs_ulcorner", "", + "acs_llcorner", "", + "acs_urcorner", "", + "acs_lrcorner", "", + "acs_ltee", "", + "acs_rtee", "", + "acs_btee", "", + "acs_ttee", "", + "acs_hline", "", + "acs_vline", "", + "acs_plus", "", + "memory_lock", "", + "memory_unlock", "", + "box_chars_1", "", +} diff --git a/vendor/github.com/Nvveen/Gotty/gotty.go b/vendor/github.com/Nvveen/Gotty/gotty.go new file mode 100644 index 0000000..c329778 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/gotty.go @@ -0,0 +1,244 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Gotty is a Go-package for reading and parsing the terminfo database +package gotty + +// TODO add more concurrency to name lookup, look for more opportunities. + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "os" + "path" + "reflect" + "strings" + "sync" +) + +// Open a terminfo file by the name given and construct a TermInfo object. +// If something went wrong reading the terminfo database file, an error is +// returned. +func OpenTermInfo(termName string) (*TermInfo, error) { + if len(termName) == 0 { + return nil, errors.New("No termname given") + } + // Find the environment variables + if termloc := os.Getenv("TERMINFO"); len(termloc) > 0 { + return readTermInfo(path.Join(termloc, string(termName[0]), termName)) + } else { + // Search like ncurses + locations := []string{} + if h := os.Getenv("HOME"); len(h) > 0 { + locations = append(locations, path.Join(h, ".terminfo")) + } + locations = append(locations, + "/etc/terminfo/", + "/lib/terminfo/", + "/usr/share/terminfo/") + for _, str := range locations { + term, err := readTermInfo(path.Join(str, string(termName[0]), termName)) + if err == nil { + return term, nil + } + } + return nil, errors.New("No terminfo file(-location) found") + } +} + +// Open a terminfo file from the environment variable containing the current +// terminal name and construct a TermInfo object. If something went wrong +// reading the terminfo database file, an error is returned. +func OpenTermInfoEnv() (*TermInfo, error) { + termenv := os.Getenv("TERM") + return OpenTermInfo(termenv) +} + +// Return an attribute by the name attr provided. If none can be found, +// an error is returned. +func (term *TermInfo) GetAttribute(attr string) (stacker, error) { + // Channel to store the main value in. + var value stacker + // Add a blocking WaitGroup + var block sync.WaitGroup + // Keep track of variable being written. + written := false + // Function to put into goroutine. + f := func(ats interface{}) { + var ok bool + var v stacker + // Switch on type of map to use and assign value to it. + switch reflect.TypeOf(ats).Elem().Kind() { + case reflect.Bool: + v, ok = ats.(map[string]bool)[attr] + case reflect.Int16: + v, ok = ats.(map[string]int16)[attr] + case reflect.String: + v, ok = ats.(map[string]string)[attr] + } + // If ok, a value is found, so we can write. + if ok { + value = v + written = true + } + // Goroutine is done + block.Done() + } + block.Add(3) + // Go for all 3 attribute lists. + go f(term.boolAttributes) + go f(term.numAttributes) + go f(term.strAttributes) + // Wait until every goroutine is done. + block.Wait() + // If a value has been written, return it. + if written { + return value, nil + } + // Otherwise, error. + return nil, fmt.Errorf("Erorr finding attribute") +} + +// Return an attribute by the name attr provided. If none can be found, +// an error is returned. A name is first converted to its termcap value. +func (term *TermInfo) GetAttributeName(name string) (stacker, error) { + tc := GetTermcapName(name) + return term.GetAttribute(tc) +} + +// A utility function that finds and returns the termcap equivalent of a +// variable name. +func GetTermcapName(name string) string { + // Termcap name + var tc string + // Blocking group + var wait sync.WaitGroup + // Function to put into a goroutine + f := func(attrs []string) { + // Find the string corresponding to the name + for i, s := range attrs { + if s == name { + tc = attrs[i+1] + } + } + // Goroutine is finished + wait.Done() + } + wait.Add(3) + // Go for all 3 attribute lists + go f(BoolAttr[:]) + go f(NumAttr[:]) + go f(StrAttr[:]) + // Wait until every goroutine is done + wait.Wait() + // Return the termcap name + return tc +} + +// This function takes a path to a terminfo file and reads it in binary +// form to construct the actual TermInfo file. +func readTermInfo(path string) (*TermInfo, error) { + // Open the terminfo file + file, err := os.Open(path) + defer file.Close() + if err != nil { + return nil, err + } + + // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize + // Header is composed of the magic 0432 octal number, size of the name + // section, size of the boolean section, the amount of number values, + // the number of offsets of strings, and the size of the string section. + var header [6]int16 + // Byte array is used to read in byte values + var byteArray []byte + // Short array is used to read in short values + var shArray []int16 + // TermInfo object to store values + var term TermInfo + + // Read in the header + err = binary.Read(file, binary.LittleEndian, &header) + if err != nil { + return nil, err + } + // If magic number isn't there or isn't correct, we have the wrong filetype + if header[0] != 0432 { + return nil, errors.New(fmt.Sprintf("Wrong filetype")) + } + + // Read in the names + byteArray = make([]byte, header[1]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.Names = strings.Split(string(byteArray), "|") + + // Read in the booleans + byteArray = make([]byte, header[2]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.boolAttributes = make(map[string]bool) + for i, b := range byteArray { + if b == 1 { + term.boolAttributes[BoolAttr[i*2+1]] = true + } + } + // If the number of bytes read is not even, a byte for alignment is added + // We know the header is an even number of bytes so only need to check the + // total of the names and booleans. + if (header[1]+header[2])%2 != 0 { + err = binary.Read(file, binary.LittleEndian, make([]byte, 1)) + if err != nil { + return nil, err + } + } + + // Read in shorts + shArray = make([]int16, header[3]) + err = binary.Read(file, binary.LittleEndian, &shArray) + if err != nil { + return nil, err + } + term.numAttributes = make(map[string]int16) + for i, n := range shArray { + if n != 0377 && n > -1 { + term.numAttributes[NumAttr[i*2+1]] = n + } + } + + // Read the offsets into the short array + shArray = make([]int16, header[4]) + err = binary.Read(file, binary.LittleEndian, &shArray) + if err != nil { + return nil, err + } + // Read the actual strings in the byte array + byteArray = make([]byte, header[5]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.strAttributes = make(map[string]string) + // We get an offset, and then iterate until the string is null-terminated + for i, offset := range shArray { + if offset > -1 { + if int(offset) >= len(byteArray) { + return nil, errors.New("array out of bounds reading string section") + } + r := bytes.IndexByte(byteArray[offset:], 0) + if r == -1 { + return nil, errors.New("missing nul byte reading string section") + } + r += int(offset) + term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r]) + } + } + return &term, nil +} diff --git a/vendor/github.com/Nvveen/Gotty/parser.go b/vendor/github.com/Nvveen/Gotty/parser.go new file mode 100644 index 0000000..a9d5d23 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/parser.go @@ -0,0 +1,362 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +var exp = [...]string{ + "%%", + "%c", + "%s", + "%p(\\d)", + "%P([A-z])", + "%g([A-z])", + "%'(.)'", + "%{([0-9]+)}", + "%l", + "%\\+|%-|%\\*|%/|%m", + "%&|%\\||%\\^", + "%=|%>|%<", + "%A|%O", + "%!|%~", + "%i", + "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]", + "%\\?(.*?);", +} + +var regex *regexp.Regexp +var staticVar map[byte]stacker + +// Parses the attribute that is received with name attr and parameters params. +func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) { + // Get the attribute name first. + iface, err := term.GetAttribute(attr) + str, ok := iface.(string) + if err != nil { + return "", err + } + if !ok { + return str, errors.New("Only string capabilities can be parsed.") + } + // Construct the hidden parser struct so we can use a recursive stack based + // parser. + ps := &parser{} + // Dynamic variables only exist in this context. + ps.dynamicVar = make(map[byte]stacker, 26) + ps.parameters = make([]stacker, len(params)) + // Convert the parameters to insert them into the parser struct. + for i, x := range params { + ps.parameters[i] = x + } + // Recursively walk and return. + result, err := ps.walk(str) + return result, err +} + +// Parses the attribute that is received with name attr and parameters params. +// Only works on full name of a capability that is given, which it uses to +// search for the termcap name. +func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) { + tc := GetTermcapName(attr) + return term.Parse(tc, params) +} + +// Identify each token in a stack based manner and do the actual parsing. +func (ps *parser) walk(attr string) (string, error) { + // We use a buffer to get the modified string. + var buf bytes.Buffer + // Next, find and identify all tokens by their indices and strings. + tokens := regex.FindAllStringSubmatch(attr, -1) + if len(tokens) == 0 { + return attr, nil + } + indices := regex.FindAllStringIndex(attr, -1) + q := 0 // q counts the matches of one token + // Iterate through the string per character. + for i := 0; i < len(attr); i++ { + // If the current position is an identified token, execute the following + // steps. + if q < len(indices) && i >= indices[q][0] && i < indices[q][1] { + // Switch on token. + switch { + case tokens[q][0][:2] == "%%": + // Literal percentage character. + buf.WriteByte('%') + case tokens[q][0][:2] == "%c": + // Pop a character. + c, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + buf.WriteByte(c.(byte)) + case tokens[q][0][:2] == "%s": + // Pop a string. + str, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + if _, ok := str.(string); !ok { + return buf.String(), errors.New("Stack head is not a string") + } + buf.WriteString(str.(string)) + case tokens[q][0][:2] == "%p": + // Push a parameter on the stack. + index, err := strconv.ParseInt(tokens[q][1], 10, 8) + index-- + if err != nil { + return buf.String(), err + } + if int(index) >= len(ps.parameters) { + return buf.String(), errors.New("Parameters index out of bound") + } + ps.st.push(ps.parameters[index]) + case tokens[q][0][:2] == "%P": + // Pop a variable from the stack as a dynamic or static variable. + val, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + index := tokens[q][2] + if len(index) > 1 { + errorStr := fmt.Sprintf("%s is not a valid dynamic variables index", + index) + return buf.String(), errors.New(errorStr) + } + // Specify either dynamic or static. + if index[0] >= 'a' && index[0] <= 'z' { + ps.dynamicVar[index[0]] = val + } else if index[0] >= 'A' && index[0] <= 'Z' { + staticVar[index[0]] = val + } + case tokens[q][0][:2] == "%g": + // Push a variable from the stack as a dynamic or static variable. + index := tokens[q][3] + if len(index) > 1 { + errorStr := fmt.Sprintf("%s is not a valid static variables index", + index) + return buf.String(), errors.New(errorStr) + } + var val stacker + if index[0] >= 'a' && index[0] <= 'z' { + val = ps.dynamicVar[index[0]] + } else if index[0] >= 'A' && index[0] <= 'Z' { + val = staticVar[index[0]] + } + ps.st.push(val) + case tokens[q][0][:2] == "%'": + // Push a character constant. + con := tokens[q][4] + if len(con) > 1 { + errorStr := fmt.Sprintf("%s is not a valid character constant", con) + return buf.String(), errors.New(errorStr) + } + ps.st.push(con[0]) + case tokens[q][0][:2] == "%{": + // Push an integer constant. + con, err := strconv.ParseInt(tokens[q][5], 10, 32) + if err != nil { + return buf.String(), err + } + ps.st.push(con) + case tokens[q][0][:2] == "%l": + // Push the length of the string that is popped from the stack. + popStr, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + if _, ok := popStr.(string); !ok { + errStr := fmt.Sprintf("Stack head is not a string") + return buf.String(), errors.New(errStr) + } + ps.st.push(len(popStr.(string))) + case tokens[q][0][:2] == "%?": + // If-then-else construct. First, the whole string is identified and + // then inside this substring, we can specify which parts to switch on. + ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);") + ifTokens := ifReg.FindStringSubmatch(tokens[q][0]) + var ( + ifStr string + err error + ) + // Parse the if-part to determine if-else. + if len(ifTokens[1]) > 0 { + ifStr, err = ps.walk(ifTokens[1]) + } else { // else + ifStr, err = ps.walk(ifTokens[4]) + } + // Return any errors + if err != nil { + return buf.String(), err + } else if len(ifStr) > 0 { + // Self-defined limitation, not sure if this is correct, but didn't + // seem like it. + return buf.String(), errors.New("If-clause cannot print statements") + } + var thenStr string + // Pop the first value that is set by parsing the if-clause. + choose, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + // Switch to if or else. + if choose.(int) == 0 && len(ifTokens[1]) > 0 { + thenStr, err = ps.walk(ifTokens[3]) + } else if choose.(int) != 0 { + if len(ifTokens[1]) > 0 { + thenStr, err = ps.walk(ifTokens[2]) + } else { + thenStr, err = ps.walk(ifTokens[5]) + } + } + if err != nil { + return buf.String(), err + } + buf.WriteString(thenStr) + case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits. + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'x': + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'X': + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 's': + token := tokens[q][0] + // Remove the : that comes before a flag. + if token[1] == ':' { + token = token[:1] + token[2:] + } + digit, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + // The rest is determined like the normal formatted prints. + digitStr := fmt.Sprintf(token, digit.(int)) + buf.WriteString(digitStr) + case tokens[q][0][:2] == "%i": + // Increment the parameters by one. + if len(ps.parameters) < 2 { + return buf.String(), errors.New("Not enough parameters to increment.") + } + val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int) + val1++ + val2++ + ps.parameters[0], ps.parameters[1] = val1, val2 + default: + // The rest of the tokens is a special case, where two values are + // popped and then operated on by the token that comes after them. + op1, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + op2, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + var result stacker + switch tokens[q][0][:2] { + case "%+": + // Addition + result = op2.(int) + op1.(int) + case "%-": + // Subtraction + result = op2.(int) - op1.(int) + case "%*": + // Multiplication + result = op2.(int) * op1.(int) + case "%/": + // Division + result = op2.(int) / op1.(int) + case "%m": + // Modulo + result = op2.(int) % op1.(int) + case "%&": + // Bitwise AND + result = op2.(int) & op1.(int) + case "%|": + // Bitwise OR + result = op2.(int) | op1.(int) + case "%^": + // Bitwise XOR + result = op2.(int) ^ op1.(int) + case "%=": + // Equals + result = op2 == op1 + case "%>": + // Greater-than + result = op2.(int) > op1.(int) + case "%<": + // Lesser-than + result = op2.(int) < op1.(int) + case "%A": + // Logical AND + result = op2.(bool) && op1.(bool) + case "%O": + // Logical OR + result = op2.(bool) || op1.(bool) + case "%!": + // Logical complement + result = !op1.(bool) + case "%~": + // Bitwise complement + result = ^(op1.(int)) + } + ps.st.push(result) + } + + i = indices[q][1] - 1 + q++ + } else { + // We are not "inside" a token, so just skip until the end or the next + // token, and add all characters to the buffer. + j := i + if q != len(indices) { + for !(j >= indices[q][0] && j < indices[q][1]) { + j++ + } + } else { + j = len(attr) + } + buf.WriteString(string(attr[i:j])) + i = j + } + } + // Return the buffer as a string. + return buf.String(), nil +} + +// Push a stacker-value onto the stack. +func (st *stack) push(s stacker) { + *st = append(*st, s) +} + +// Pop a stacker-value from the stack. +func (st *stack) pop() (stacker, error) { + if len(*st) == 0 { + return nil, errors.New("Stack is empty.") + } + newStack := make(stack, len(*st)-1) + val := (*st)[len(*st)-1] + copy(newStack, (*st)[:len(*st)-1]) + *st = newStack + return val, nil +} + +// Initialize regexes and the static vars (that don't get changed between +// calls. +func init() { + // Initialize the main regex. + expStr := strings.Join(exp[:], "|") + regex, _ = regexp.Compile(expStr) + // Initialize the static variables. + staticVar = make(map[byte]stacker, 26) +} diff --git a/vendor/github.com/Nvveen/Gotty/types.go b/vendor/github.com/Nvveen/Gotty/types.go new file mode 100644 index 0000000..9bcc65e --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/types.go @@ -0,0 +1,23 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +type TermInfo struct { + boolAttributes map[string]bool + numAttributes map[string]int16 + strAttributes map[string]string + // The various names of the TermInfo file. + Names []string +} + +type stacker interface { +} +type stack []stacker + +type parser struct { + st stack + parameters []stacker + dynamicVar map[byte]stacker +} diff --git a/vendor/github.com/RackSec/srslog/LICENSE b/vendor/github.com/RackSec/srslog/LICENSE new file mode 100644 index 0000000..9269338 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015 Rackspace. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/RackSec/srslog/README.md b/vendor/github.com/RackSec/srslog/README.md new file mode 100644 index 0000000..1ae1fd4 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/README.md @@ -0,0 +1,131 @@ +[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) + +# srslog + +Go has a `syslog` package in the standard library, but it has the following +shortcomings: + +1. It doesn't have TLS support +2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) + +I agree that it doesn't need to be in the standard library. So, I've +followed Brad's suggestion and have made a separate project to handle syslog. + +This code was taken directly from the Go project as a base to start from. + +However, this _does_ have TLS support. + +# Usage + +Basic usage retains the same interface as the original `syslog` package. We +only added to the interface where required to support new functionality. + +Switch from the standard library: + +``` +import( + //"log/syslog" + syslog "github.com/RackSec/srslog" +) +``` + +You can still use it for local syslog: + +``` +w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted UDP: + +``` +w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted TCP: + +``` +w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") +``` + +But now you can also send messages via TLS-encrypted TCP: + +``` +w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") +``` + +And if you need more control over your TLS configuration : + +``` +pool := x509.NewCertPool() +serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") +if err != nil { + return nil, err +} +pool.AppendCertsFromPEM(serverCert) +config := tls.Config{ + RootCAs: pool, +} + +w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) +``` + +(Note that in both TLS cases, this uses a self-signed certificate, where the +remote syslog server has the keypair and the client has only the public key.) + +And then to write log messages, continue like so: + +``` +if err != nil { + log.Fatal("failed to connect to syslog:", err) +} +defer w.Close() + +w.Alert("this is an alert") +w.Crit("this is critical") +w.Err("this is an error") +w.Warning("this is a warning") +w.Notice("this is a notice") +w.Info("this is info") +w.Debug("this is debug") +w.Write([]byte("these are some bytes")) +``` + +# Generating TLS Certificates + +We've provided a script that you can use to generate a self-signed keypair: + +``` +pip install cryptography +python script/gen-certs.py +``` + +That outputs the public key and private key to standard out. Put those into +`.pem` files. (And don't put them into any source control. The certificate in +the `test` directory is used by the unit tests, and please do not actually use +it anywhere else.) + +# Running Tests + +Run the tests as usual: + +``` +go test +``` + +But we've also provided a test coverage script that will show you which +lines of code are not covered: + +``` +script/coverage --html +``` + +That will open a new browser tab showing coverage information. + +# License + +This project uses the New BSD License, the same as the Go project itself. + +# Code of Conduct + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. diff --git a/vendor/github.com/RackSec/srslog/constants.go b/vendor/github.com/RackSec/srslog/constants.go new file mode 100644 index 0000000..600801e --- /dev/null +++ b/vendor/github.com/RackSec/srslog/constants.go @@ -0,0 +1,68 @@ +package srslog + +import ( + "errors" +) + +// Priority is a combination of the syslog facility and +// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity +// message from the FTP facility. The default severity is LOG_EMERG; +// the default facility is LOG_KERN. +type Priority int + +const severityMask = 0x07 +const facilityMask = 0xf8 + +const ( + // Severity. + + // From /usr/include/sys/syslog.h. + // These are the same on Linux, BSD, and OS X. + LOG_EMERG Priority = iota + LOG_ALERT + LOG_CRIT + LOG_ERR + LOG_WARNING + LOG_NOTICE + LOG_INFO + LOG_DEBUG +) + +const ( + // Facility. + + // From /usr/include/sys/syslog.h. + // These are the same up to LOG_FTP on Linux, BSD, and OS X. + LOG_KERN Priority = iota << 3 + LOG_USER + LOG_MAIL + LOG_DAEMON + LOG_AUTH + LOG_SYSLOG + LOG_LPR + LOG_NEWS + LOG_UUCP + LOG_CRON + LOG_AUTHPRIV + LOG_FTP + _ // unused + _ // unused + _ // unused + _ // unused + LOG_LOCAL0 + LOG_LOCAL1 + LOG_LOCAL2 + LOG_LOCAL3 + LOG_LOCAL4 + LOG_LOCAL5 + LOG_LOCAL6 + LOG_LOCAL7 +) + +func validatePriority(p Priority) error { + if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { + return errors.New("log/syslog: invalid priority") + } else { + return nil + } +} diff --git a/vendor/github.com/RackSec/srslog/dialer.go b/vendor/github.com/RackSec/srslog/dialer.go new file mode 100644 index 0000000..47a7b2b --- /dev/null +++ b/vendor/github.com/RackSec/srslog/dialer.go @@ -0,0 +1,87 @@ +package srslog + +import ( + "crypto/tls" + "net" +) + +// dialerFunctionWrapper is a simple object that consists of a dialer function +// and its name. This is primarily for testing, so we can make sure that the +// getDialer method returns the correct dialer function. However, if you ever +// find that you need to check which dialer function you have, this would also +// be useful for you without having to use reflection. +type dialerFunctionWrapper struct { + Name string + Dialer func() (serverConn, string, error) +} + +// Call the wrapped dialer function and return its return values. +func (df dialerFunctionWrapper) Call() (serverConn, string, error) { + return df.Dialer() +} + +// getDialer returns a "dialer" function that can be called to connect to a +// syslog server. +// +// Each dialer function is responsible for dialing the remote host and returns +// a serverConn, the hostname (or a default if the Writer has not specified a +// hostname), and an error in case dialing fails. +// +// The reason for separate dialers is that different network types may need +// to dial their connection differently, yet still provide a net.Conn interface +// that you can use once they have dialed. Rather than an increasingly long +// conditional, we have a map of network -> dialer function (with a sane default +// value), and adding a new network type is as easy as writing the dialer +// function and adding it to the map. +func (w *Writer) getDialer() dialerFunctionWrapper { + dialers := map[string]dialerFunctionWrapper{ + "": dialerFunctionWrapper{"unixDialer", w.unixDialer}, + "tcp+tls": dialerFunctionWrapper{"tlsDialer", w.tlsDialer}, + } + dialer, ok := dialers[w.network] + if !ok { + dialer = dialerFunctionWrapper{"basicDialer", w.basicDialer} + } + return dialer +} + +// unixDialer uses the unixSyslog method to open a connection to the syslog +// daemon running on the local machine. +func (w *Writer) unixDialer() (serverConn, string, error) { + sc, err := unixSyslog() + hostname := w.hostname + if hostname == "" { + hostname = "localhost" + } + return sc, hostname, err +} + +// tlsDialer connects to TLS over TCP, and is used for the "tcp+tls" network +// type. +func (w *Writer) tlsDialer() (serverConn, string, error) { + c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = &netConn{conn: c} + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} + +// basicDialer is the most common dialer for syslog, and supports both TCP and +// UDP connections. +func (w *Writer) basicDialer() (serverConn, string, error) { + c, err := net.Dial(w.network, w.raddr) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = &netConn{conn: c} + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} diff --git a/vendor/github.com/RackSec/srslog/formatter.go b/vendor/github.com/RackSec/srslog/formatter.go new file mode 100644 index 0000000..7852ad3 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/formatter.go @@ -0,0 +1,48 @@ +package srslog + +import ( + "fmt" + "os" + "time" +) + +// Formatter is a type of function that takes the consituent parts of a +// syslog message and returns a formatted string. A different Formatter is +// defined for each different syslog protocol we support. +type Formatter func(p Priority, hostname, tag, content string) string + +// DefaultFormatter is the original format supported by the Go syslog package, +// and is a non-compliant amalgamation of 3164 and 5424 that is intended to +// maximize compatibility. +func DefaultFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// UnixFormatter omits the hostname, because it is only used locally. +func UnixFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s[%d]: %s", + p, timestamp, tag, os.Getpid(), content) + return msg +} + +// RFC3164Formatter provides an RFC 3164 compliant message. +func RFC3164Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// RFC5424Formatter provides an RFC 5424 compliant message. +func RFC5424Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + appName := os.Args[0] + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, appName, pid, tag, content) + return msg +} diff --git a/vendor/github.com/RackSec/srslog/framer.go b/vendor/github.com/RackSec/srslog/framer.go new file mode 100644 index 0000000..ab46f0d --- /dev/null +++ b/vendor/github.com/RackSec/srslog/framer.go @@ -0,0 +1,24 @@ +package srslog + +import ( + "fmt" +) + +// Framer is a type of function that takes an input string (typically an +// already-formatted syslog message) and applies "message framing" to it. We +// have different framers because different versions of the syslog protocol +// and its transport requirements define different framing behavior. +type Framer func(in string) string + +// DefaultFramer does nothing, since there is no framing to apply. This is +// the original behavior of the Go syslog package, and is also typically used +// for UDP syslog. +func DefaultFramer(in string) string { + return in +} + +// RFC5425MessageLengthFramer prepends the message length to the front of the +// provided message, as defined in RFC 5425. +func RFC5425MessageLengthFramer(in string) string { + return fmt.Sprintf("%d %s", len(in), in) +} diff --git a/vendor/github.com/RackSec/srslog/net_conn.go b/vendor/github.com/RackSec/srslog/net_conn.go new file mode 100644 index 0000000..75e4c3c --- /dev/null +++ b/vendor/github.com/RackSec/srslog/net_conn.go @@ -0,0 +1,30 @@ +package srslog + +import ( + "net" +) + +// netConn has an internal net.Conn and adheres to the serverConn interface, +// allowing us to send syslog messages over the network. +type netConn struct { + conn net.Conn +} + +// writeString formats syslog messages using time.RFC3339 and includes the +// hostname, and sends the message to the connection. +func (n *netConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = DefaultFormatter + } + formattedMessage := framer(formatter(p, hostname, tag, msg)) + _, err := n.conn.Write([]byte(formattedMessage)) + return err +} + +// close the network connection +func (n *netConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/RackSec/srslog/srslog.go b/vendor/github.com/RackSec/srslog/srslog.go new file mode 100644 index 0000000..b404dff --- /dev/null +++ b/vendor/github.com/RackSec/srslog/srslog.go @@ -0,0 +1,97 @@ +package srslog + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "log" + "os" +) + +// This interface allows us to work with both local and network connections, +// and enables Solaris support (see syslog_unix.go). +type serverConn interface { + writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, s string) error + close() error +} + +// New establishes a new connection to the system log daemon. Each +// write to the returned Writer sends a log message with the given +// priority and prefix. +func New(priority Priority, tag string) (w *Writer, err error) { + return Dial("", "", priority, tag) +} + +// Dial establishes a connection to a log daemon by connecting to +// address raddr on the specified network. Each write to the returned +// Writer sends a log message with the given facility, severity and +// tag. +// If network is empty, Dial will connect to the local syslog server. +func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { + return DialWithTLSConfig(network, raddr, priority, tag, nil) +} + +// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses certPath to load TLS certificates and configure +// the secure connection. +func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { + serverCert, err := ioutil.ReadFile(certPath) + if err != nil { + return nil, err + } + + return DialWithTLSCert(network, raddr, priority, tag, serverCert) +} + +// DialWIthTLSCert establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses serverCert to load a TLS certificate +// and configure the secure connection. +func DialWithTLSCert(network, raddr string, priority Priority, tag string, serverCert []byte) (*Writer, error) { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(serverCert) + config := tls.Config{ + RootCAs: pool, + } + + return DialWithTLSConfig(network, raddr, priority, tag, &config) +} + +// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses tlsConfig to configure the secure connection. +func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { + if err := validatePriority(priority); err != nil { + return nil, err + } + + if tag == "" { + tag = os.Args[0] + } + hostname, _ := os.Hostname() + + w := &Writer{ + priority: priority, + tag: tag, + hostname: hostname, + network: network, + raddr: raddr, + tlsConfig: tlsConfig, + } + + _, err := w.connect() + if err != nil { + return nil, err + } + return w, err +} + +// NewLogger creates a log.Logger whose output is written to +// the system log service with the specified priority. The logFlag +// argument is the flag set passed through to log.New to create +// the Logger. +func NewLogger(p Priority, logFlag int) (*log.Logger, error) { + s, err := New(p, "") + if err != nil { + return nil, err + } + return log.New(s, "", logFlag), nil +} diff --git a/vendor/github.com/RackSec/srslog/srslog_unix.go b/vendor/github.com/RackSec/srslog/srslog_unix.go new file mode 100644 index 0000000..a04d939 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/srslog_unix.go @@ -0,0 +1,54 @@ +package srslog + +import ( + "errors" + "io" + "net" +) + +// unixSyslog opens a connection to the syslog daemon running on the +// local machine using a Unix domain socket. This function exists because of +// Solaris support as implemented by gccgo. On Solaris you can not +// simply open a TCP connection to the syslog daemon. The gccgo +// sources have a syslog_solaris.go file that implements unixSyslog to +// return a type that satisfies the serverConn interface and simply calls the C +// library syslog function. +func unixSyslog() (conn serverConn, err error) { + logTypes := []string{"unixgram", "unix"} + logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} + for _, network := range logTypes { + for _, path := range logPaths { + conn, err := net.Dial(network, path) + if err != nil { + continue + } else { + return &localConn{conn: conn}, nil + } + } + } + return nil, errors.New("Unix syslog delivery error") +} + +// localConn adheres to the serverConn interface, allowing us to send syslog +// messages to the local syslog daemon over a Unix domain socket. +type localConn struct { + conn io.WriteCloser +} + +// writeString formats syslog messages using time.Stamp instead of time.RFC3339, +// and omits the hostname (because it is expected to be used locally). +func (n *localConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = UnixFormatter + } + _, err := n.conn.Write([]byte(framer(formatter(p, hostname, tag, msg)))) + return err +} + +// close the (local) network connection +func (n *localConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/RackSec/srslog/writer.go b/vendor/github.com/RackSec/srslog/writer.go new file mode 100644 index 0000000..ce3d867 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/writer.go @@ -0,0 +1,185 @@ +package srslog + +import ( + "crypto/tls" + "strings" + "sync" +) + +// A Writer is a connection to a syslog server. +type Writer struct { + priority Priority + tag string + hostname string + network string + raddr string + tlsConfig *tls.Config + framer Framer + formatter Formatter + + mu sync.RWMutex // guards conn + conn serverConn +} + +// getConn provides access to the internal conn, protected by a mutex. The +// conn is threadsafe, so it can be used while unlocked, but we want to avoid +// race conditions on grabbing a reference to it. +func (w *Writer) getConn() serverConn { + w.mu.RLock() + conn := w.conn + w.mu.RUnlock() + return conn +} + +// setConn updates the internal conn, protected by a mutex. +func (w *Writer) setConn(c serverConn) { + w.mu.Lock() + w.conn = c + w.mu.Unlock() +} + +// connect makes a connection to the syslog server. +func (w *Writer) connect() (serverConn, error) { + conn := w.getConn() + if conn != nil { + // ignore err from close, it makes sense to continue anyway + conn.close() + w.setConn(nil) + } + + var hostname string + var err error + dialer := w.getDialer() + conn, hostname, err = dialer.Call() + if err == nil { + w.setConn(conn) + w.hostname = hostname + + return conn, nil + } else { + return nil, err + } +} + +// SetFormatter changes the formatter function for subsequent messages. +func (w *Writer) SetFormatter(f Formatter) { + w.formatter = f +} + +// SetFramer changes the framer function for subsequent messages. +func (w *Writer) SetFramer(f Framer) { + w.framer = f +} + +// Write sends a log message to the syslog daemon using the default priority +// passed into `srslog.New` or the `srslog.Dial*` functions. +func (w *Writer) Write(b []byte) (int, error) { + return w.writeAndRetry(w.priority, string(b)) +} + +// WriteWithPriority sends a log message with a custom priority +func (w *Writer) WriteWithPriority(p Priority, b []byte) (int, error) { + return w.writeAndRetry(p, string(b)) +} + +// Close closes a connection to the syslog daemon. +func (w *Writer) Close() error { + conn := w.getConn() + if conn != nil { + err := conn.close() + w.setConn(nil) + return err + } + return nil +} + +// Emerg logs a message with severity LOG_EMERG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Emerg(m string) (err error) { + _, err = w.writeAndRetry(LOG_EMERG, m) + return err +} + +// Alert logs a message with severity LOG_ALERT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Alert(m string) (err error) { + _, err = w.writeAndRetry(LOG_ALERT, m) + return err +} + +// Crit logs a message with severity LOG_CRIT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Crit(m string) (err error) { + _, err = w.writeAndRetry(LOG_CRIT, m) + return err +} + +// Err logs a message with severity LOG_ERR; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Err(m string) (err error) { + _, err = w.writeAndRetry(LOG_ERR, m) + return err +} + +// Warning logs a message with severity LOG_WARNING; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Warning(m string) (err error) { + _, err = w.writeAndRetry(LOG_WARNING, m) + return err +} + +// Notice logs a message with severity LOG_NOTICE; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Notice(m string) (err error) { + _, err = w.writeAndRetry(LOG_NOTICE, m) + return err +} + +// Info logs a message with severity LOG_INFO; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Info(m string) (err error) { + _, err = w.writeAndRetry(LOG_INFO, m) + return err +} + +// Debug logs a message with severity LOG_DEBUG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Debug(m string) (err error) { + _, err = w.writeAndRetry(LOG_DEBUG, m) + return err +} + +func (w *Writer) writeAndRetry(p Priority, s string) (int, error) { + pr := (w.priority & facilityMask) | (p & severityMask) + + conn := w.getConn() + if conn != nil { + if n, err := w.write(conn, pr, s); err == nil { + return n, err + } + } + + var err error + if conn, err = w.connect(); err != nil { + return 0, err + } + return w.write(conn, pr, s) +} + +// write generates and writes a syslog formatted string. It formats the +// message based on the current Formatter and Framer. +func (w *Writer) write(conn serverConn, p Priority, msg string) (int, error) { + // ensure it ends in a \n + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + err := conn.writeString(w.framer, w.formatter, p, w.hostname, w.tag, msg) + if err != nil { + return 0, err + } + // Note: return the length of the input, not the number of + // bytes printed by Fprintf, because this must behave like + // an io.Writer. + return len(msg), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 0000000..f090cb4 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 0000000..126cd1f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,425 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | + + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. + +Third party logging formatters: + +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +logger, hook := NewNullLogger() +logger.Error("Hello error") + +assert.Equal(1, len(hook.Entries)) +assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) +assert.Equal("Hello error", hook.LastEntry().Message) + +hook.Reset() +assert.Nil(hook.LastEntry()) +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safty + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000..b4c9e84 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://bitbucket.org/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 0000000..dddd5f8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000..4edbe7a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,275 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + serialized, err := entry.Logger.Formatter.Format(&entry) + entry.Buffer = nil + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } else { + entry.Logger.mu.Lock() + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + entry.Logger.mu.Unlock() + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000..9a0120a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000..b5fbe93 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000..3f151cd --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000..2ad6dc5 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,41 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(timestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000..b769f3d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,308 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000..e596691 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go new file mode 100644 index 0000000..1960169 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go @@ -0,0 +1,8 @@ +// +build appengine + +package logrus + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + return true +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 0000000..5f6be4d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 0000000..308160c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 0000000..329038f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,22 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 0000000..a3c6f6e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,15 @@ +// +build solaris,!appengine + +package logrus + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 0000000..3727e8a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000..9114b3c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,168 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) + b.WriteByte(' ') +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + switch value := value.(type) { + case string: + if !needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if !needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", errmsg) + } + default: + fmt.Fprint(b, value) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 0000000..f74d2aa --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,53 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + switch level { + case DebugLevel: + printFunc = logger.Debug + case InfoLevel: + printFunc = logger.Info + case WarnLevel: + printFunc = logger.Warn + case ErrorLevel: + printFunc = logger.Error + case FatalLevel: + printFunc = logger.Fatal + case PanicLevel: + printFunc = logger.Panic + default: + printFunc = logger.Print + } + + go logger.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/agl/ed25519/LICENSE b/vendor/github.com/agl/ed25519/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/agl/ed25519/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/agl/ed25519/ed25519.go b/vendor/github.com/agl/ed25519/ed25519.go new file mode 100644 index 0000000..700938d --- /dev/null +++ b/vendor/github.com/agl/ed25519/ed25519.go @@ -0,0 +1,125 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// http://ed25519.cr.yp.to/. +package ed25519 + +// This code is a port of the public domain, "ref10" implementation of ed25519 +// from SUPERCOP. + +import ( + "crypto/sha512" + "crypto/subtle" + "io" + + "github.com/agl/ed25519/edwards25519" +) + +const ( + PublicKeySize = 32 + PrivateKeySize = 64 + SignatureSize = 64 +) + +// GenerateKey generates a public/private key pair using randomness from rand. +func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) { + privateKey = new([64]byte) + publicKey = new([32]byte) + _, err = io.ReadFull(rand, privateKey[:32]) + if err != nil { + return nil, nil, err + } + + h := sha512.New() + h.Write(privateKey[:32]) + digest := h.Sum(nil) + + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest) + edwards25519.GeScalarMultBase(&A, &hBytes) + A.ToBytes(publicKey) + + copy(privateKey[32:], publicKey[:]) + return +} + +// Sign signs the message with privateKey and returns a signature. +func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte { + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := new([64]byte) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + return signature +} + +// Verify returns true iff sig is a valid signature of message by publicKey. +func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool { + if sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + if !A.FromBytes(publicKey) { + return false + } + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var b [32]byte + copy(b[:], sig[32:]) + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) + + var checkR [32]byte + R.ToBytes(&checkR) + return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 +} diff --git a/vendor/github.com/agl/ed25519/edwards25519/const.go b/vendor/github.com/agl/ed25519/edwards25519/const.go new file mode 100644 index 0000000..ea5b77a --- /dev/null +++ b/vendor/github.com/agl/ed25519/edwards25519/const.go @@ -0,0 +1,1411 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/github.com/agl/ed25519/edwards25519/edwards25519.go b/vendor/github.com/agl/ed25519/edwards25519/edwards25519.go new file mode 100644 index 0000000..184b4a8 --- /dev/null +++ b/vendor/github.com/agl/ed25519/edwards25519/edwards25519.go @@ -0,0 +1,2127 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edwards25519 implements operations in GF(2**255-19) and on an +// Edwards curve that is isomorphic to curve25519. See +// http://ed25519.cr.yp.to/. +package edwards25519 + +// This code is a port of the public domain, "ref10" implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +func FeZero(fe *FieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func FeSub(dst, a, b *FieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func FeCopy(dst, src *FieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + var x FieldElement + b = -b + for i := range x { + x[i] = b & (f[i] ^ g[i]) + } + + for i := range f { + f[i] ^= x[i] + } +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + for i := range h { + h[i] = -f[i] + } +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 /* 1.4*2^29 */ + g2_19 := 19 * g2 /* 1.4*2^30; still ok */ + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.959375*2^30 + f6_19 := 19 * f6 // 1.959375*2^30 + f7_38 := 38 * f7 // 1.959375*2^30 + f8_19 := 19 * f8 // 1.959375*2^30 + f9_38 := 38 * f9 // 1.959375*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) == (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 0000000..106569e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 0000000..d9f46e8 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,68 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Sinks +===== + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a statsite instance (TCP) +* StatsdSink: Sinks to a statsd / statsite instance (UDP) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Examples +======== + +Here is an example of using the package: + + func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) + } + + // Configure a statsite sink as the global metrics sink + sink, _ := metrics.NewStatsiteSink("statsite:8125") + metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + + // Emit a Key/Value pair + metrics.EmitKey([]string{"questions", "meaning of life"}, 42) + + +Here is an example of setting up an signal handler: + + // Setup the inmem sink and signal handler + inm := NewInmemSink(10*time.Second, time.Minute) + sig := DefaultInmemSignal(inm) + metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + + // Run some code + inm.SetGauge([]string{"foo"}, 42) + inm.EmitKey([]string{"bar"}, 30) + + inm.IncrCounter([]string{"baz"}, 42) + inm.IncrCounter([]string{"baz"}, 1) + inm.IncrCounter([]string{"baz"}, 80) + + inm.AddSample([]string{"method", "wow"}, 42) + inm.AddSample([]string{"method", "wow"}, 100) + inm.AddSample([]string{"method", "wow"}, 22) + + .... + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 + diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 0000000..31098dd --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 0000000..38136af --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 0000000..0749229 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,239 @@ +package metrics + +import ( + "fmt" + "math" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]float32 + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]*AggregateSample + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]*AggregateSample +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]float32), + Points: make(map[string][]float32), + Counters: make(map[string]*AggregateSample), + Samples: make(map[string]*AggregateSample), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Sum float64 // The sum of values + SumSq float64 // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f", a.Count, a.Sum) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum) + } +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = val +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg := intv.Counters[k] + if agg == nil { + agg = &AggregateSample{} + intv.Counters[k] = agg + } + agg.Ingest(float64(val)) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg := intv.Samples[k] + if agg == nil { + agg = &AggregateSample{} + intv.Samples[k] = agg + } + agg.Ingest(float64(val)) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + intervals := make([]*IntervalMetrics, len(i.intervals)) + copy(intervals, i.intervals) + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Replace(joined, " ", "_", -1) +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 0000000..95d08ee --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for i := 0; i < len(data)-1; i++ { + intv := data[i] + intv.RLock() + for name, val := range intv.Gauges { + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for name, agg := range intv.Counters { + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg) + } + for name, agg := range intv.Samples { + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100755 index 0000000..b818e41 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "runtime" + "time" +) + +func (m *Metrics) SetGauge(key []string, val float32) { + if m.HostName != "" && m.EnableHostname { + key = insert(0, m.HostName, key) + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.SetGauge(key, val) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.IncrCounter(key, val) +} + +func (m *Metrics) AddSample(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + m.sink.AddSample(key, val) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSample(key, msec) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100755 index 0000000..0c240c2 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,52 @@ +package metrics + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + for _, s := range fh { + s.SetGauge(key, val) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + for _, s := range fh { + s.IncrCounter(key, val) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + for _, s := range fh { + s.AddSample(key, val) + } +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100755 index 0000000..44113f1 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,95 @@ +package metrics + +import ( + "os" + "time" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to seperate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink +} + +// Shared global metrics instance +var globalMetrics *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics = &Metrics{sink: &BlackholeSink{}} +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics = metrics + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.SetGauge(key, val) +} + +func EmitKey(key []string, val float32) { + globalMetrics.EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.IncrCounter(key, val) +} + +func AddSample(key []string, val float32) { + globalMetrics.AddSample(key, val) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.MeasureSince(key, start) +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 0000000..65a5021 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,154 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100755 index 0000000..6873013 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,142 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 0000000..a5df10e --- /dev/null +++ b/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md new file mode 100644 index 0000000..c054fe8 --- /dev/null +++ b/vendor/github.com/armon/go-radix/README.md @@ -0,0 +1,36 @@ +go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) +========= + +Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := radix.New() +r.Insert("foo", 1) +r.Insert("bar", 2) +r.Insert("foobar", 2) + +// Find the longest prefix match +m, _, _ := r.LongestPrefix("foozip") +if m != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 0000000..8c963c9 --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,467 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } else { + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.replaceEdge(edge{ + label: search[0], + node: child, + }) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } + return nil, false +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should merge this node + if len(n.edges) == 1 { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges + } + return leaf.val, true +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 0000000..5f14d11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md new file mode 100644 index 0000000..947643a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -0,0 +1,116 @@ +# AWS SDK for Go + + +[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) +[![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) +[![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) + + +aws-sdk-go is the official AWS SDK for the Go programming language. + +Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK. + +## Installing + +If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder. + + go get -u github.com/aws/aws-sdk-go + +Otherwise if your Go environment does not have vendoring support enabled, or you do not want to include the vendored SDK's dependencies you can use the following command to retrieve the SDK and its non-testing dependencies using `go get`. + + go get -u github.com/aws/aws-sdk-go/aws/... + go get -u github.com/aws/aws-sdk-go/service/... + +If you're looking to retrieve just the SDK without any dependencies use the following command. + + go get -d github.com/aws/aws-sdk-go/ + +These two processes will still include the `vendor` folder and it should be deleted if its not going to be used by your environment. + + rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor + +## Reference Documentation +[`Getting Started Guide`](https://aws.amazon.com/sdk-for-go/) - This document is a general introduction how to configure and make requests with the SDK. If this is your first time using the SDK, this documentation and the API documentation will help you get started. This document focuses on the syntax and behavior of the SDK. The [Service Developer Guide](https://aws.amazon.com/documentation/) will help you get started using specific AWS services. + +[`SDK API Reference Documentation`](https://docs.aws.amazon.com/sdk-for-go/api/) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of the SDK, and examples how to using the SDK, service client API operations, and API operation require parameters. + +[`Service Developer Guide`](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with an AWS service. These are great guides both, if you're getting started with a service, or looking for more information on a service. You should not need this document for coding, though in some cases, services may supply helpful samples that you might want to look out for. + +[`SDK Examples`](https://github.com/aws/aws-sdk-go/tree/master/example) - Included in the SDK's repo are a several hand crafted examples using the SDK features and AWS services. + +## Configuring Credentials + +Before using the SDK, ensure that you've configured credentials. The best +way to configure credentials on a development machine is to use the +`~/.aws/credentials` file, which might look like: + +``` +[default] +aws_access_key_id = AKID1234567890 +aws_secret_access_key = MY-SECRET-KEY +``` + +You can learn more about the credentials file from this +[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs). + +Alternatively, you can set the following environment variables: + +``` +AWS_ACCESS_KEY_ID=AKID1234567890 +AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY +``` + +### AWS shared config file (`~/.aws/config`) +The AWS SDK for Go added support the shared config file in release [v1.3.0](https://github.com/aws/aws-sdk-go/releases/tag/v1.3.0). You can opt into enabling support for the shared config by setting the environment variable `AWS_SDK_LOAD_CONFIG` to a truthy value. See the [Session](https://github.com/aws/aws-sdk-go/wiki/sessions) wiki for more information about this feature. + +## Using the Go SDK + +To use a service in the SDK, create a service variable by calling the `New()` +function. Once you have a service client, you can call API operations which each +return response data and a possible error. + +To list a set of instance IDs from EC2, you could run: + +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func main() { + // Create an EC2 service object in the "us-west-2" region + // Note that you can also configure your region globally by + // exporting the AWS_REGION environment variable + svc := ec2.New(session.NewSession(), &aws.Config{Region: aws.String("us-west-2")}) + + // Call the DescribeInstances Operation + resp, err := svc.DescribeInstances(nil) + if err != nil { + panic(err) + } + + // resp has all of the response data, pull out instance IDs: + fmt.Println("> Number of reservation sets: ", len(resp.Reservations)) + for idx, res := range resp.Reservations { + fmt.Println(" > Number of instances: ", len(res.Instances)) + for _, inst := range resp.Reservations[idx].Instances { + fmt.Println(" - Instance ID: ", *inst.InstanceId) + } + } +} +``` + +You can find more information and operations in our +[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE.txt and NOTICE.txt for more information. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 0000000..56fdfc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 0000000..0202a00 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 0000000..1a3d106 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 0000000..59fa4a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 0000000..11c52c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 0000000..fc38172 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,107 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000..b6432f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 0000000..7c0e7d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,137 @@ +package client + +import ( + "fmt" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 0000000..43a3676 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,90 @@ +package client + +import ( + "math/rand" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 500 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 0000000..4778056 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 0000000..34c2bab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,419 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig tructure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess, err := session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// }) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpiont to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess, err := session.NewSession() + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess, err := session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// ) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 0000000..3b73a7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,369 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 0000000..8e12f82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,182 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be aded to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed signficantly before it is sent. Or signficant delays +// occur whne retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 10 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(10 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 0000000..7d50b15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 0000000..6efc77b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 0000000..7b8ebf5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 0000000..aa9d689 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 0000000..a4cec5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,191 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 0000000..96655bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 0000000..7fb7cbf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 0000000..4f5dab3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 0000000..30c847a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,161 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + TokenCode *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil && p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } + roleOutput, err := p.Client.AssumeRole(input) + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 0000000..8dbbf67 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,130 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + }, + }) +} + +// RemoteCredProvider returns a credenitials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + + if len(ecsCredURI) > 0 { + return ecsCredProvider(cfg, handlers, ecsCredURI) + } + + return ec2RoleProvider(cfg, handlers) +} + +func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { + const host = `169.254.170.2` + + return endpointcreds.NewProviderClient(cfg, handlers, + fmt.Sprintf("http://%s%s", host, uri), + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, + aws.StringValue(cfg.Region), true, false) + + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 0000000..e5755d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,162 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "user-data"), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshalling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 0000000..5b4379d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 0000000..5766361 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 0000000..db87188 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 0000000..5279c19 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,187 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 0000000..79f7960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 0000000..02f07f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,58 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 0000000..8ef9715 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,344 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + + built bool + + // Need to persist an intermideant body betweend the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and rewraps the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request returning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// ResetBody rewinds the request body backto its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +func (r *Request) ResetBody() { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + r.HTTPRequest.Body = r.safeBody +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if strings.Contains(r.Error.Error(), "net/http: request canceled") { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 0000000..2939ec4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 0000000..8cc8b01 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,101 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +func (r *Request) IsErrorThrottle() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeThrottle(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 0000000..2520286 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 0000000..d3dc840 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,223 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess, err := session.NewSession() + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")}) + + // Create a S3 client instance from a session + sess, err := session.NewSession() + if err != nil { + // Handle Session creation error + } + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{}) + + // Specify profile to load for the session's config + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + }) + + // Specify profile for config and region for requests + sess, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + }) + + // Force enable Shared Config support + sess, err := session.NewSessionWithOptions(session.Options{ + SharedConfigState: SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess, err := session.NewSession() + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Println("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared credentials +file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They arfrom a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK does not support +assuming a role with MFA token Via the Session's constructor. You can use the +stscreds.AssumeRoleProvider credentials provider to specify custom +configuration and support for MFA. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = not supported! + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + + +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 0000000..d2f0c84 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,188 @@ +package session + +import ( + "os" + "path/filepath" + "strconv" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string +} + +var ( + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = "EnvConfigCredentials" + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + cfg.SharedCredentialsFile = sharedCredentialsFilename() + cfg.SharedConfigFile = sharedConfigFilename() + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func sharedCredentialsFilename() string { + if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "credentials") +} + +func sharedConfigFilename() string { + if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "config") +} + +func userHomeDir() string { + homeDir := os.Getenv("HOME") // *nix + if len(homeDir) == 0 { // windows + homeDir = os.Getenv("USERPROFILE") + } + + return homeDir +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 0000000..602f4e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,393 @@ +package session + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ClientConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occured while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functiions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + s, err := newSession(envCfg, cfgs...) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occuring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + return s + } + + return oldNewSession(cfgs...) +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifing the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + envCfg := loadEnvConfig() + + return newSession(envCfg, cfgs...) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevent. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess, err := session.NewSessionWithOptions(session.Options{}) +// +// // Specify profile to load for the session's config +// sess, err := session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// }) +// +// // Specify profile for config and region for requests +// sess, err := session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// }) +// +// // Force enable Shared Config support +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: SharedConfigEnable, +// }) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func oldNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Order config files will be loaded in with later files overwriting + // previous config file values. + cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + cfg.Credentials = stscreds.NewCredentials( + &Session{ + Config: &cfgCp, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // MFA not supported + }, + ) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + } +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), + serviceName, + aws.StringValue(s.Config.Region), + aws.BoolValue(s.Config.DisableSSL), + aws.BoolValue(s.Config.UseDualStack), + ) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 0000000..b58076f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,295 @@ +package session + +import ( + "fmt" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/go-ini/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string +} + +type sharedConfigFile struct { + Filename string + IniData *ini.File +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + // Skip files which can't be opened and read for whatever reason + continue + } + + f, err := ini.Load(b) + if err != nil { + return nil, SharedConfigLoadError{Filename: filename} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: f, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, err := file.IniData.GetSection(profile) + if err != nil { + // Fallback to to alternate profile name: profile + section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if err != nil { + return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + } + } + + // Shared Credentials + akid := section.Key(accessKeyIDKey).String() + secret := section.Key(secretAccessKey).String() + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.Key(sessionTokenKey).String(), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.Key(roleArnKey).String() + srcProfile := section.Key(sourceProfileKey).String() + if len(roleArn) > 0 && len(srcProfile) > 0 { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), + } + } + + // Region + if v := section.Key(regionKey).String(); len(v) > 0 { + cfg.Region = v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 0000000..244c86d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 0000000..bd082e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go new file mode 100644 index 0000000..7966041 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go @@ -0,0 +1,24 @@ +// +build !go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.Path + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 0000000..986530b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,713 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !ctx.isPresign { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + }) + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, signingTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.isPresign && ctx.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 0000000..fa014b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,106 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 0000000..b01cd70 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.4.22" diff --git a/vendor/github.com/aws/aws-sdk-go/private/README.md b/vendor/github.com/aws/aws-sdk-go/private/README.md new file mode 100644 index 0000000..5bdb4c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/README.md @@ -0,0 +1,4 @@ +## AWS SDK for Go Private packages ## +`private` is a collection of packages used internally by the SDK, and is subject to have breaking changes. This package is not `internal` so that if you really need to use its functionality, and understand breaking changes will be made, you are able to. + +These packages will be refactored in the future so that the API generator and model parsers are exposed cleanly on their own. Making it easier for you to generate your own code based on the API models. diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 0000000..19d9756 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,70 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run -tags codegen ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL, useDualStack) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) { + dualStackField := "" + if useDualStack { + dualStackField = "/dualstack" + } + + derivedKeys := []string{ + region + "/" + svcName + dualStackField, + region + "/*" + dualStackField, + "*/" + svcName + dualStackField, + "*/*" + dualStackField, + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 0000000..e79e678 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,95 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/budgets": { + Endpoint: "budgets.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "*/s3/dualstack": { + Endpoint: "s3.dualstack.{region}.amazonaws.com", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "cn-north-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 0000000..53831df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 0000000..4504efe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,254 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(value, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + switch value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + buf.WriteString(strconv.FormatBool(value.Bool())) + case reflect.Int64: + buf.WriteString(strconv.FormatInt(value.Int(), 10)) + case reflect.Float64: + buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64)) + default: + switch value.Type() { + case timeType: + converted := value.Interface().(time.Time) + buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) + case byteSliceType: + if !value.IsNil() { + converted := value.Interface().([]byte) + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for _, r := range s { + if r == '"' { + buf.WriteString(`\"`) + } else if r == '\\' { + buf.WriteString(`\\`) + } else if r == '\b' { + buf.WriteString(`\b`) + } else if r == '\f' { + buf.WriteString(`\f`) + } else if r == '\r' { + buf.WriteString(`\r`) + } else if r == '\t' { + buf.WriteString(`\t`) + } else if r == '\n' { + buf.WriteString(`\n`) + } else if r < 32 { + fmt.Fprintf(buf, "\\u%0.4x", r) + } else { + buf.WriteRune(r) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 0000000..fea5356 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,213 @@ +package jsonutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "reflect" + "time" +) + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + b, err := ioutil.ReadAll(stream) + if err != nil { + return err + } + + if len(b) == 0 { + return nil + } + + if err := json.Unmarshal(b, &out); err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + errf := func() error { + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + default: + return errf() + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return errf() + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return errf() + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 0000000..56af4dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,111 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + if req.ClientInfo.JSONVersion != "" { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", req.HTTPResponse.Status, nil), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 0000000..18169f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 0000000..60ea0bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 0000000..e0f4d5a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 0000000..f214296 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 0000000..5f41251 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,256 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 0000000..4366de2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 0000000..2cba1d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,198 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 0000000..da1a681 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 0000000..221029b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 0000000..49f291a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 0000000..72c198a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go new file mode 100644 index 0000000..ba48157 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go @@ -0,0 +1,4516 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package cloudwatchlogs provides a client for Amazon CloudWatch Logs. +package cloudwatchlogs + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CancelExportTask for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelExportTaskRequest method. +// req, resp := client.CancelExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CancelExportTaskOutput{} + req.Data = output + return +} + +// CancelExportTask API operation for Amazon CloudWatch Logs. +// +// Cancels an export task if it is in PENDING or RUNNING state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CancelExportTask for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * InvalidOperationException +// Returned if the operation is not valid on the specified resource +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateExportTask = "CreateExportTask" + +// CreateExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateExportTask for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateExportTaskRequest method. +// req, resp := client.CreateExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { + op := &request.Operation{ + Name: opCreateExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateExportTaskInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateExportTaskOutput{} + req.Data = output + return +} + +// CreateExportTask API operation for Amazon CloudWatch Logs. +// +// Creates an ExportTask which allows you to efficiently export data from a +// Log Group to your Amazon S3 bucket. +// +// This is an asynchronous call. If all the required information is provided, +// this API will initiate an export task and respond with the task Id. Once +// started, DescribeExportTasks can be used to get the status of an export task. +// You can only have one active (RUNNING or PENDING) export task at a time, +// per account. +// +// You can export logs from multiple log groups or multiple time ranges to the +// same Amazon S3 bucket. To separate out log data for each export task, you +// can specify a prefix that will be used as the Amazon S3 key prefix for all +// exported objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateExportTask for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * LimitExceededException +// Returned if you have reached the maximum number of resources that can be +// created. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * ResourceAlreadyExistsException +// Returned if the specified resource already exists. +// +func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { + req, out := c.CreateExportTaskRequest(input) + err := req.Send() + return out, err +} + +const opCreateLogGroup = "CreateLogGroup" + +// CreateLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateLogGroup for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLogGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLogGroupRequest method. +// req, resp := client.CreateLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { + op := &request.Operation{ + Name: opCreateLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateLogGroupOutput{} + req.Data = output + return +} + +// CreateLogGroup API operation for Amazon CloudWatch Logs. +// +// Creates a new log group with the specified name. The name of the log group +// must be unique within a region for an AWS account. You can create up to 500 +// log groups per account. +// +// You must use the following guidelines when naming a log group: +// +// * Log group names can be between 1 and 512 characters long. +// +// * Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), +// '/' (forward slash), and '.' (period). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateLogGroup for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceAlreadyExistsException +// Returned if the specified resource already exists. +// +// * LimitExceededException +// Returned if you have reached the maximum number of resources that can be +// created. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { + req, out := c.CreateLogGroupRequest(input) + err := req.Send() + return out, err +} + +const opCreateLogStream = "CreateLogStream" + +// CreateLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateLogStream for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLogStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLogStreamRequest method. +// req, resp := client.CreateLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { + op := &request.Operation{ + Name: opCreateLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateLogStreamOutput{} + req.Data = output + return +} + +// CreateLogStream API operation for Amazon CloudWatch Logs. +// +// Creates a new log stream in the specified log group. The name of the log +// stream must be unique within the log group. There is no limit on the number +// of log streams that can exist in a log group. +// +// You must use the following guidelines when naming a log stream: +// +// * Log stream names can be between 1 and 512 characters long. +// +// * The ':' colon character is not allowed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateLogStream for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceAlreadyExistsException +// Returned if the specified resource already exists. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { + req, out := c.CreateLogStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDestination = "DeleteDestination" + +// DeleteDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDestination operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteDestination for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDestination method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDestinationRequest method. +// req, resp := client.DeleteDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { + op := &request.Operation{ + Name: opDeleteDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDestinationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDestinationOutput{} + req.Data = output + return +} + +// DeleteDestination API operation for Amazon CloudWatch Logs. +// +// Deletes the destination with the specified name and eventually disables all +// the subscription filters that publish to it. This will not delete the physical +// resource encapsulated by the destination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteDestination for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { + req, out := c.DeleteDestinationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLogGroup = "DeleteLogGroup" + +// DeleteLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteLogGroup for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLogGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLogGroupRequest method. +// req, resp := client.DeleteLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { + op := &request.Operation{ + Name: opDeleteLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogGroupInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLogGroupOutput{} + req.Data = output + return +} + +// DeleteLogGroup API operation for Amazon CloudWatch Logs. +// +// Deletes the log group with the specified name and permanently deletes all +// the archived log events associated with it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteLogGroup for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { + req, out := c.DeleteLogGroupRequest(input) + err := req.Send() + return out, err +} + +const opDeleteLogStream = "DeleteLogStream" + +// DeleteLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteLogStream for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLogStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLogStreamRequest method. +// req, resp := client.DeleteLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { + op := &request.Operation{ + Name: opDeleteLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogStreamInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteLogStreamOutput{} + req.Data = output + return +} + +// DeleteLogStream API operation for Amazon CloudWatch Logs. +// +// Deletes a log stream and permanently deletes all the archived log events +// associated with it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteLogStream for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { + req, out := c.DeleteLogStreamRequest(input) + err := req.Send() + return out, err +} + +const opDeleteMetricFilter = "DeleteMetricFilter" + +// DeleteMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteMetricFilter for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMetricFilterRequest method. +// req, resp := client.DeleteMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { + op := &request.Operation{ + Name: opDeleteMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteMetricFilterOutput{} + req.Data = output + return +} + +// DeleteMetricFilter API operation for Amazon CloudWatch Logs. +// +// Deletes a metric filter associated with the specified log group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteMetricFilter for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { + req, out := c.DeleteMetricFilterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteRetentionPolicy = "DeleteRetentionPolicy" + +// DeleteRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRetentionPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteRetentionPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRetentionPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRetentionPolicyRequest method. +// req, resp := client.DeleteRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRetentionPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteRetentionPolicyOutput{} + req.Data = output + return +} + +// DeleteRetentionPolicy API operation for Amazon CloudWatch Logs. +// +// Deletes the retention policy of the specified log group. Log events would +// not expire if they belong to log groups without a retention policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteRetentionPolicy for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" + +// DeleteSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubscriptionFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteSubscriptionFilter for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSubscriptionFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSubscriptionFilterRequest method. +// req, resp := client.DeleteSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opDeleteSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubscriptionFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteSubscriptionFilterOutput{} + req.Data = output + return +} + +// DeleteSubscriptionFilter API operation for Amazon CloudWatch Logs. +// +// Deletes a subscription filter associated with the specified log group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteSubscriptionFilter for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { + req, out := c.DeleteSubscriptionFilterRequest(input) + err := req.Send() + return out, err +} + +const opDescribeDestinations = "DescribeDestinations" + +// DescribeDestinationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDestinations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeDestinations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDestinations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDestinationsRequest method. +// req, resp := client.DescribeDestinationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { + op := &request.Operation{ + Name: opDescribeDestinations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDestinationsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDestinationsOutput{} + req.Data = output + return +} + +// DescribeDestinations API operation for Amazon CloudWatch Logs. +// +// Returns all the destinations that are associated with the AWS account making +// the request. The list returned in the response is ASCII-sorted by destination +// name. +// +// By default, this operation returns up to 50 destinations. If there are more +// destinations to list, the response would contain a nextToken value in the +// response body. You can also limit the number of destinations returned in +// the response by specifying the limit parameter in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeDestinations for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { + req, out := c.DescribeDestinationsRequest(input) + err := req.Send() + return out, err +} + +// DescribeDestinationsPages iterates over the pages of a DescribeDestinations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDestinations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDestinations operation. +// pageNum := 0 +// err := client.DescribeDestinationsPages(params, +// func(page *DescribeDestinationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeDestinationsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeDestinationsOutput), lastPage) + }) +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeExportTasks for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeExportTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeExportTasksRequest method. +// req, resp := client.DescribeExportTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeExportTasksOutput{} + req.Data = output + return +} + +// DescribeExportTasks API operation for Amazon CloudWatch Logs. +// +// Returns all the export tasks that are associated with the AWS account making +// the request. The export tasks can be filtered based on TaskId or TaskStatus. +// +// By default, this operation returns up to 50 export tasks that satisfy the +// specified filters. If there are more export tasks to list, the response would +// contain a nextToken value in the response body. You can also limit the number +// of export tasks returned in the response by specifying the limit parameter +// in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeExportTasks for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + err := req.Send() + return out, err +} + +const opDescribeLogGroups = "DescribeLogGroups" + +// DescribeLogGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeLogGroups for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLogGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLogGroupsRequest method. +// req, resp := client.DescribeLogGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { + op := &request.Operation{ + Name: opDescribeLogGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLogGroupsOutput{} + req.Data = output + return +} + +// DescribeLogGroups API operation for Amazon CloudWatch Logs. +// +// Returns all the log groups that are associated with the AWS account making +// the request. The list returned in the response is ASCII-sorted by log group +// name. +// +// By default, this operation returns up to 50 log groups. If there are more +// log groups to list, the response would contain a nextToken value in the response +// body. You can also limit the number of log groups returned in the response +// by specifying the limit parameter in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeLogGroups for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { + req, out := c.DescribeLogGroupsRequest(input) + err := req.Send() + return out, err +} + +// DescribeLogGroupsPages iterates over the pages of a DescribeLogGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogGroups operation. +// pageNum := 0 +// err := client.DescribeLogGroupsPages(params, +// func(page *DescribeLogGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLogGroupsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLogGroupsOutput), lastPage) + }) +} + +const opDescribeLogStreams = "DescribeLogStreams" + +// DescribeLogStreamsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogStreams operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeLogStreams for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLogStreams method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLogStreamsRequest method. +// req, resp := client.DescribeLogStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { + op := &request.Operation{ + Name: opDescribeLogStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogStreamsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLogStreamsOutput{} + req.Data = output + return +} + +// DescribeLogStreams API operation for Amazon CloudWatch Logs. +// +// Returns all the log streams that are associated with the specified log group. +// The list returned in the response is ASCII-sorted by log stream name. +// +// By default, this operation returns up to 50 log streams. If there are more +// log streams to list, the response would contain a nextToken value in the +// response body. You can also limit the number of log streams returned in the +// response by specifying the limit parameter in the request. This operation +// has a limit of five transactions per second, after which transactions are +// throttled. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeLogStreams for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { + req, out := c.DescribeLogStreamsRequest(input) + err := req.Send() + return out, err +} + +// DescribeLogStreamsPages iterates over the pages of a DescribeLogStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogStreams operation. +// pageNum := 0 +// err := client.DescribeLogStreamsPages(params, +// func(page *DescribeLogStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeLogStreamsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeLogStreamsOutput), lastPage) + }) +} + +const opDescribeMetricFilters = "DescribeMetricFilters" + +// DescribeMetricFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMetricFilters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeMetricFilters for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMetricFilters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMetricFiltersRequest method. +// req, resp := client.DescribeMetricFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { + op := &request.Operation{ + Name: opDescribeMetricFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMetricFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeMetricFiltersOutput{} + req.Data = output + return +} + +// DescribeMetricFilters API operation for Amazon CloudWatch Logs. +// +// Returns all the metrics filters associated with the specified log group. +// The list returned in the response is ASCII-sorted by filter name. +// +// By default, this operation returns up to 50 metric filters. If there are +// more metric filters to list, the response would contain a nextToken value +// in the response body. You can also limit the number of metric filters returned +// in the response by specifying the limit parameter in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeMetricFilters for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { + req, out := c.DescribeMetricFiltersRequest(input) + err := req.Send() + return out, err +} + +// DescribeMetricFiltersPages iterates over the pages of a DescribeMetricFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMetricFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMetricFilters operation. +// pageNum := 0 +// err := client.DescribeMetricFiltersPages(params, +// func(page *DescribeMetricFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeMetricFiltersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeMetricFiltersOutput), lastPage) + }) +} + +const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" + +// DescribeSubscriptionFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubscriptionFilters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeSubscriptionFilters for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSubscriptionFilters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSubscriptionFiltersRequest method. +// req, resp := client.DescribeSubscriptionFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { + op := &request.Operation{ + Name: opDescribeSubscriptionFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSubscriptionFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSubscriptionFiltersOutput{} + req.Data = output + return +} + +// DescribeSubscriptionFilters API operation for Amazon CloudWatch Logs. +// +// Returns all the subscription filters associated with the specified log group. +// The list returned in the response is ASCII-sorted by filter name. +// +// By default, this operation returns up to 50 subscription filters. If there +// are more subscription filters to list, the response would contain a nextToken +// value in the response body. You can also limit the number of subscription +// filters returned in the response by specifying the limit parameter in the +// request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeSubscriptionFilters for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { + req, out := c.DescribeSubscriptionFiltersRequest(input) + err := req.Send() + return out, err +} + +// DescribeSubscriptionFiltersPages iterates over the pages of a DescribeSubscriptionFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSubscriptionFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSubscriptionFilters operation. +// pageNum := 0 +// err := client.DescribeSubscriptionFiltersPages(params, +// func(page *DescribeSubscriptionFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSubscriptionFiltersRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSubscriptionFiltersOutput), lastPage) + }) +} + +const opFilterLogEvents = "FilterLogEvents" + +// FilterLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the FilterLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See FilterLogEvents for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FilterLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FilterLogEventsRequest method. +// req, resp := client.FilterLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { + op := &request.Operation{ + Name: opFilterLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &FilterLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &FilterLogEventsOutput{} + req.Data = output + return +} + +// FilterLogEvents API operation for Amazon CloudWatch Logs. +// +// Retrieves log events, optionally filtered by a filter pattern from the specified +// log group. You can provide an optional time range to filter the results on +// the event timestamp. You can limit the streams searched to an explicit list +// of logStreamNames. +// +// By default, this operation returns as much matching log events as can fit +// in a response size of 1MB, up to 10,000 log events, or all the events found +// within a time-bounded scan window. If the response includes a nextToken, +// then there is more data to search, and the search can be resumed with a new +// request providing the nextToken. The response will contain a list of searchedLogStreams +// that contains information about which streams were searched in the request +// and whether they have been searched completely or require further pagination. +// The limit parameter in the request can be used to specify the maximum number +// of events to return in a page. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation FilterLogEvents for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { + req, out := c.FilterLogEventsRequest(input) + err := req.Send() + return out, err +} + +// FilterLogEventsPages iterates over the pages of a FilterLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See FilterLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a FilterLogEvents operation. +// pageNum := 0 +// err := client.FilterLogEventsPages(params, +// func(page *FilterLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.FilterLogEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*FilterLogEventsOutput), lastPage) + }) +} + +const opGetLogEvents = "GetLogEvents" + +// GetLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the GetLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetLogEvents for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetLogEventsRequest method. +// req, resp := client.GetLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { + op := &request.Operation{ + Name: opGetLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextForwardToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetLogEventsOutput{} + req.Data = output + return +} + +// GetLogEvents API operation for Amazon CloudWatch Logs. +// +// Retrieves log events from the specified log stream. You can provide an optional +// time range to filter the results on the event timestamp. +// +// By default, this operation returns as much log events as can fit in a response +// size of 1MB, up to 10,000 log events. The response will always include a +// nextForwardToken and a nextBackwardToken in the response body. You can use +// any of these tokens in subsequent GetLogEvents requests to paginate through +// events in either forward or backward direction. You can also limit the number +// of log events returned in the response by specifying the limit parameter +// in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetLogEvents for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { + req, out := c.GetLogEventsRequest(input) + err := req.Send() + return out, err +} + +// GetLogEventsPages iterates over the pages of a GetLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLogEvents operation. +// pageNum := 0 +// err := client.GetLogEventsPages(params, +// func(page *GetLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.GetLogEventsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*GetLogEventsOutput), lastPage) + }) +} + +const opPutDestination = "PutDestination" + +// PutDestinationRequest generates a "aws/request.Request" representing the +// client's request for the PutDestination operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutDestination for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutDestination method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutDestinationRequest method. +// req, resp := client.PutDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { + op := &request.Operation{ + Name: opPutDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationInput{} + } + + req = c.newRequest(op, input, output) + output = &PutDestinationOutput{} + req.Data = output + return +} + +// PutDestination API operation for Amazon CloudWatch Logs. +// +// Creates or updates a Destination. A destination encapsulates a physical resource +// (such as a Kinesis stream) and allows you to subscribe to a real-time stream +// of log events of a different account, ingested through PutLogEvents requests. +// Currently, the only supported physical resource is a Amazon Kinesis stream +// belonging to the same account as the destination. +// +// A destination controls what is written to its Amazon Kinesis stream through +// an access policy. By default, PutDestination does not set any access policy +// with the destination, which means a cross-account user will not be able to +// call PutSubscriptionFilter against this destination. To enable that, the +// destination owner must call PutDestinationPolicy after PutDestination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDestination for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { + req, out := c.PutDestinationRequest(input) + err := req.Send() + return out, err +} + +const opPutDestinationPolicy = "PutDestinationPolicy" + +// PutDestinationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutDestinationPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutDestinationPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutDestinationPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutDestinationPolicyRequest method. +// req, resp := client.PutDestinationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { + op := &request.Operation{ + Name: opPutDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutDestinationPolicyOutput{} + req.Data = output + return +} + +// PutDestinationPolicy API operation for Amazon CloudWatch Logs. +// +// Creates or updates an access policy associated with an existing Destination. +// An access policy is an IAM policy document (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) +// that is used to authorize claims to register a subscription filter against +// a given destination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDestinationPolicy for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { + req, out := c.PutDestinationPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutLogEvents = "PutLogEvents" + +// PutLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutLogEvents for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutLogEventsRequest method. +// req, resp := client.PutLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { + op := &request.Operation{ + Name: opPutLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLogEventsInput{} + } + + req = c.newRequest(op, input, output) + output = &PutLogEventsOutput{} + req.Data = output + return +} + +// PutLogEvents API operation for Amazon CloudWatch Logs. +// +// Uploads a batch of log events to the specified log stream. +// +// Every PutLogEvents request must include the sequenceToken obtained from the +// response of the previous request. An upload in a newly created log stream +// does not require a sequenceToken. You can also get the sequenceToken using +// DescribeLogStreams. +// +// The batch of events must satisfy the following constraints: +// +// * The maximum batch size is 1,048,576 bytes, and this size is calculated +// as the sum of all event messages in UTF-8, plus 26 bytes for each log +// event. +// +// * None of the log events in the batch can be more than 2 hours in the +// future. +// +// * None of the log events in the batch can be older than 14 days or the +// retention period of the log group. +// +// * The log events in the batch must be in chronological ordered by their +// timestamp. +// +// * The maximum number of log events in a batch is 10,000. +// +// * A batch of log events in a single PutLogEvents request cannot span more +// than 24 hours. Otherwise, the PutLogEvents operation will fail. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutLogEvents for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * InvalidSequenceTokenException + +// +// * DataAlreadyAcceptedException + +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { + req, out := c.PutLogEventsRequest(input) + err := req.Send() + return out, err +} + +const opPutMetricFilter = "PutMetricFilter" + +// PutMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutMetricFilter for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMetricFilterRequest method. +// req, resp := client.PutMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { + op := &request.Operation{ + Name: opPutMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutMetricFilterOutput{} + req.Data = output + return +} + +// PutMetricFilter API operation for Amazon CloudWatch Logs. +// +// Creates or updates a metric filter and associates it with the specified log +// group. Metric filters allow you to configure rules to extract metric data +// from log events ingested through PutLogEvents requests. +// +// The maximum number of metric filters that can be associated with a log group +// is 100. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutMetricFilter for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * LimitExceededException +// Returned if you have reached the maximum number of resources that can be +// created. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { + req, out := c.PutMetricFilterRequest(input) + err := req.Send() + return out, err +} + +const opPutRetentionPolicy = "PutRetentionPolicy" + +// PutRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRetentionPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutRetentionPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRetentionPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRetentionPolicyRequest method. +// req, resp := client.PutRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { + op := &request.Operation{ + Name: opPutRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRetentionPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutRetentionPolicyOutput{} + req.Data = output + return +} + +// PutRetentionPolicy API operation for Amazon CloudWatch Logs. +// +// Sets the retention of the specified log group. A retention policy allows +// you to configure the number of days you want to retain log events in the +// specified log group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutRetentionPolicy for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutSubscriptionFilter = "PutSubscriptionFilter" + +// PutSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutSubscriptionFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutSubscriptionFilter for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutSubscriptionFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutSubscriptionFilterRequest method. +// req, resp := client.PutSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opPutSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutSubscriptionFilterInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutSubscriptionFilterOutput{} + req.Data = output + return +} + +// PutSubscriptionFilter API operation for Amazon CloudWatch Logs. +// +// Creates or updates a subscription filter and associates it with the specified +// log group. Subscription filters allow you to subscribe to a real-time stream +// of log events ingested through PutLogEvents requests and have them delivered +// to a specific destination. Currently, the supported destinations are: +// +// * An Amazon Kinesis stream belonging to the same account as the subscription +// filter, for same-account delivery. +// +// * A logical destination (used via an ARN of Destination) belonging to +// a different account, for cross-account delivery. +// +// * An Amazon Kinesis Firehose stream belonging to the same account as the +// subscription filter, for same-account delivery. +// +// * An AWS Lambda function belonging to the same account as the subscription +// filter, for same-account delivery. +// +// Currently there can only be one subscription filter associated with a log +// group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutSubscriptionFilter for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ResourceNotFoundException +// Returned if the specified resource does not exist. +// +// * OperationAbortedException +// Returned if multiple requests to update the same resource were in conflict. +// +// * LimitExceededException +// Returned if you have reached the maximum number of resources that can be +// created. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { + req, out := c.PutSubscriptionFilterRequest(input) + err := req.Send() + return out, err +} + +const opTestMetricFilter = "TestMetricFilter" + +// TestMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the TestMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See TestMetricFilter for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestMetricFilterRequest method. +// req, resp := client.TestMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { + op := &request.Operation{ + Name: opTestMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestMetricFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &TestMetricFilterOutput{} + req.Data = output + return +} + +// TestMetricFilter API operation for Amazon CloudWatch Logs. +// +// Tests the filter pattern of a metric filter against a sample of log event +// messages. You can use this operation to validate the correctness of a metric +// filter pattern. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation TestMetricFilter for usage and error information. +// +// Returned Error Codes: +// * InvalidParameterException +// Returned if a parameter of the request is incorrectly specified. +// +// * ServiceUnavailableException +// Returned if the service cannot complete the request. +// +func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { + req, out := c.TestMetricFilterRequest(input) + err := req.Send() + return out, err +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // Id of the export task to cancel. + // + // TaskId is a required field + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CreateExportTaskInput struct { + _ struct{} `type:"structure"` + + // Name of Amazon S3 bucket to which the log data will be exported. + // + // Note: Only buckets in the same AWS region are supported. + // + // Destination is a required field + Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` + + // Prefix that will be used as the start of Amazon S3 key for every object exported. + // If not specified, this defaults to 'exportedlogs'. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. It indicates the start time of the range for the request. Events + // with a timestamp prior to this time will not be exported. + // + // From is a required field + From *int64 `locationName:"from" type:"long" required:"true"` + + // The name of the log group to export. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Will only export log streams that match the provided logStreamNamePrefix. + // If you don't specify a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. It indicates the end time of the range for the request. Events + // with a timestamp later than this time will not be exported. + // + // To is a required field + To *int64 `locationName:"to" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateExportTaskInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Destination != nil && len(*s.Destination) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Destination", 1)) + } + if s.From == nil { + invalidParams.Add(request.NewErrParamRequired("From")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.TaskName != nil && len(*s.TaskName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskName", 1)) + } + if s.To == nil { + invalidParams.Add(request.NewErrParamRequired("To")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateExportTaskOutput struct { + _ struct{} `type:"structure"` + + // Id of the export task that got created. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskOutput) GoString() string { + return s.String() +} + +type CreateLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to create. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupOutput) GoString() string { + return s.String() +} + +type CreateLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group under which the log stream is to be created. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to create. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogStreamInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of destination to delete. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDestinationInput"} + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationOutput) GoString() string { + return s.String() +} + +type DeleteLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to delete. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupOutput) GoString() string { + return s.String() +} + +type DeleteLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group under which the log stream to delete belongs. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to delete. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogStreamInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteMetricFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the metric filter to delete. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group that is associated with the metric filter to delete. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMetricFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterOutput) GoString() string { + return s.String() +} + +type DeleteRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group that is associated with the retention policy to + // delete. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionPolicyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyOutput) GoString() string { + return s.String() +} + +type DeleteSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the subscription filter to delete. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group that is associated with the subscription filter + // to delete. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSubscriptionFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriptionFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type DescribeDestinationsInput struct { + _ struct{} `type:"structure"` + + // Will only return destinations that match the provided destinationNamePrefix. + // If you don't specify a value, no prefix is applied. + DestinationNamePrefix *string `min:"1" type:"string"` + + // The maximum number of results to return. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDestinationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDestinationsInput"} + if s.DestinationNamePrefix != nil && len(*s.DestinationNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDestinationsOutput struct { + _ struct{} `type:"structure"` + + Destinations []*Destination `locationName:"destinations" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsOutput) GoString() string { + return s.String() +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeExportTasks + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // All export tasks that matches the specified status code will be returned. + // This can return zero or more export tasks. + StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` + + // Export task that matches the specified task Id will be returned. This can + // result in zero or one export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportTasksInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of export tasks. + ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +type DescribeLogGroupsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // Will only return log groups that match the provided logGroupNamePrefix. If + // you don't specify a value, no prefix filter is applied. + LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeLogGroups + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLogGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLogGroupsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupNamePrefix != nil && len(*s.LogGroupNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupNamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeLogGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of log groups. + LogGroups []*LogGroup `locationName:"logGroups" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsOutput) GoString() string { + return s.String() +} + +type DescribeLogStreamsInput struct { + _ struct{} `type:"structure"` + + // If set to true, results are returned in descending order. If you don't specify + // a value or set it to false, results are returned in ascending order. + Descending *bool `locationName:"descending" type:"boolean"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which log streams are to be listed. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Will only return log streams that match the provided logStreamNamePrefix. + // If you don't specify a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeLogStreams + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Specifies what to order the returned log streams by. Valid arguments are + // 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results + // are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot + // also contain a logStreamNamePrefix. + OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` +} + +// String returns the string representation +func (s DescribeLogStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLogStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLogStreamsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeLogStreamsOutput struct { + _ struct{} `type:"structure"` + + // A list of log streams. + LogStreams []*LogStream `locationName:"logStreams" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsOutput) GoString() string { + return s.String() +} + +type DescribeMetricFiltersInput struct { + _ struct{} `type:"structure"` + + // Will only return metric filters that match the provided filterNamePrefix. + // If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of items returned in the response. If you don't specify + // a value, the request would return up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which metric filters are to be listed. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous DescribeMetricFilters + // request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMetricFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMetricFiltersInput"} + if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeMetricFiltersOutput struct { + _ struct{} `type:"structure"` + + MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersOutput) GoString() string { + return s.String() +} + +type DescribeSubscriptionFiltersInput struct { + _ struct{} `type:"structure"` + + // Will only return subscription filters that match the provided filterNamePrefix. + // If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of results to return. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group name for which subscription filters are to be listed. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSubscriptionFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSubscriptionFiltersInput"} + if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeSubscriptionFiltersOutput struct { + _ struct{} `type:"structure"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersOutput) GoString() string { + return s.String() +} + +// A cross account destination that is the recipient of subscription log events. +type Destination struct { + _ struct{} `type:"structure"` + + // An IAM policy document that governs which AWS accounts can create subscription + // filters against this destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` + + // ARN of this destination. + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC specifying when this destination was created. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // Name of the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string"` + + // A role for impersonation for delivering log events to the target. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` + + // ARN of the physical target where the log events will be delivered (eg. ARN + // of a Kinesis stream). + TargetArn *string `locationName:"targetArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Represents an export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // Name of Amazon S3 bucket to which the log data was exported. + Destination *string `locationName:"destination" min:"1" type:"string"` + + // Prefix that was used as the start of Amazon S3 key for every object exported. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // Execution info about the export task. + ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp prior to this time are not exported. + From *int64 `locationName:"from" type:"long"` + + // The name of the log group from which logs data was exported. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Status of the export task. + Status *ExportTaskStatus `locationName:"status" type:"structure"` + + // Id of the export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp later than this time are not exported. + To *int64 `locationName:"to" type:"long"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// Represents the status of an export task. +type ExportTaskExecutionInfo struct { + _ struct{} `type:"structure"` + + // A point in time when the export task got completed. + CompletionTime *int64 `locationName:"completionTime" type:"long"` + + // A point in time when the export task got created. + CreationTime *int64 `locationName:"creationTime" type:"long"` +} + +// String returns the string representation +func (s ExportTaskExecutionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskExecutionInfo) GoString() string { + return s.String() +} + +// Represents the status of an export task. +type ExportTaskStatus struct { + _ struct{} `type:"structure"` + + // Status code of the export task. + Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` + + // Status message related to the code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportTaskStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskStatus) GoString() string { + return s.String() +} + +type FilterLogEventsInput struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. If provided, events with a timestamp later than this time are + // not returned. + EndTime *int64 `locationName:"endTime" type:"long"` + + // A valid CloudWatch Logs filter pattern to use for filtering the response. + // If not provided, all the events are matched. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // If provided, the API will make a best effort to provide responses that contain + // events from multiple log streams within the log group interleaved in a single + // response. If not provided, all the matched log events in the first log stream + // will be searched first, then those in the next log stream, etc. + Interleaved *bool `locationName:"interleaved" type:"boolean"` + + // The maximum number of events to return in a page of results. Default is 10,000 + // events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to query. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Optional list of log stream names within the specified log group to search. + // Defaults to all the log streams in the log group. + LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` + + // A pagination token obtained from a FilterLogEvents response to continue paginating + // the FilterLogEvents results. This token is omitted from the response when + // there are no other events to display. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. If provided, events with a timestamp prior to this time are + // not returned. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s FilterLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterLogEventsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNames != nil && len(s.LogStreamNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNames", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type FilterLogEventsOutput struct { + _ struct{} `type:"structure"` + + // A list of FilteredLogEvent objects representing the matched events from the + // request. + Events []*FilteredLogEvent `locationName:"events" type:"list"` + + // A pagination token obtained from a FilterLogEvents response to continue paginating + // the FilterLogEvents results. This token is omitted from the response when + // there are no other events to display. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A list of SearchedLogStream objects indicating which log streams have been + // searched in this request and whether each has been searched completely or + // still has more to be paginated. + SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` +} + +// String returns the string representation +func (s FilterLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsOutput) GoString() string { + return s.String() +} + +// Represents a matched event from a FilterLogEvents request. +type FilteredLogEvent struct { + _ struct{} `type:"structure"` + + // A unique identifier for this event. + EventId *string `locationName:"eventId" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The name of the log stream this event belongs to. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The data contained in the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s FilteredLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilteredLogEvent) GoString() string { + return s.String() +} + +type GetLogEventsInput struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + EndTime *int64 `locationName:"endTime" type:"long"` + + // The maximum number of log events returned in the response. If you don't specify + // a value, the request would return as many log events as can fit in a response + // size of 1MB, up to 10,000 log events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to query. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to query. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the nextForwardToken or nextBackwardToken + // fields in the response of the previous GetLogEvents request. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // If set to true, the earliest log events would be returned first. The default + // is false (the latest log events are returned first). + StartFromHead *bool `locationName:"startFromHead" type:"boolean"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s GetLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogEventsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetLogEventsOutput struct { + _ struct{} `type:"structure"` + + Events []*OutputLogEvent `locationName:"events" type:"list"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` + + // A string token used for pagination that points to the next page of results. + // It must be a value obtained from the response of the previous request. The + // token expires after 24 hours. + NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsOutput) GoString() string { + return s.String() +} + +// A log event is a record of some activity that was recorded by the application +// or resource being monitored. The log event record that CloudWatch Logs understands +// contains two properties: the timestamp of when the event occurred, and the +// raw event message. +type InputLogEvent struct { + _ struct{} `type:"structure"` + + // Message is a required field + Message *string `locationName:"message" min:"1" type:"string" required:"true"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + // + // Timestamp is a required field + Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` +} + +// String returns the string representation +func (s InputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputLogEvent) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputLogEvent) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputLogEvent"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Message != nil && len(*s.Message) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Message", 1)) + } + if s.Timestamp == nil { + invalidParams.Add(request.NewErrParamRequired("Timestamp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type LogGroup struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The number of metric filters associated with the log group. + MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` + + // Specifies the number of days you want to retain log events in the specified + // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, + // 365, 400, 545, 731, 1827, 3653. + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` + + StoredBytes *int64 `locationName:"storedBytes" type:"long"` +} + +// String returns the string representation +func (s LogGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogGroup) GoString() string { + return s.String() +} + +// A log stream is sequence of log events from a single emitter of logs. +type LogStream struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` + + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + StoredBytes *int64 `locationName:"storedBytes" type:"long"` + + // A string token used for making PutLogEvents requests. A sequenceToken can + // only be used once, and PutLogEvents requests must include the sequenceToken + // obtained from the response of the previous request. + UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s LogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogStream) GoString() string { + return s.String() +} + +// Metric filters can be used to express how CloudWatch Logs would extract metric +// observations from ingested log events and transform them to metric data in +// a CloudWatch metric. +type MetricFilter struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // A name for a metric or subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event may contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` +} + +// String returns the string representation +func (s MetricFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilter) GoString() string { + return s.String() +} + +type MetricFilterMatchRecord struct { + _ struct{} `type:"structure"` + + EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` + + EventNumber *int64 `locationName:"eventNumber" type:"long"` + + ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` +} + +// String returns the string representation +func (s MetricFilterMatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilterMatchRecord) GoString() string { + return s.String() +} + +type MetricTransformation struct { + _ struct{} `type:"structure"` + + // (Optional) A default value to emit when a filter pattern does not match a + // log event. Can be null. + DefaultValue *float64 `locationName:"defaultValue" type:"double"` + + // Name of the metric. + // + // MetricName is a required field + MetricName *string `locationName:"metricName" type:"string" required:"true"` + + // Namespace to which the metric belongs. + // + // MetricNamespace is a required field + MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` + + // A string representing a value to publish to this metric when a filter pattern + // matches a log event. + // + // MetricValue is a required field + MetricValue *string `locationName:"metricValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricTransformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricTransformation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricTransformation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricTransformation"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("MetricNamespace")) + } + if s.MetricValue == nil { + invalidParams.Add(request.NewErrParamRequired("MetricValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type OutputLogEvent struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + Message *string `locationName:"message" min:"1" type:"string"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s OutputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLogEvent) GoString() string { + return s.String() +} + +type PutDestinationInput struct { + _ struct{} `type:"structure"` + + // A name for the destination. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants CloudWatch Logs permissions to do Amazon + // Kinesis PutRecord requests on the destination stream. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` + + // The ARN of an Amazon Kinesis stream to deliver matching log events to. + // + // TargetArn is a required field + TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDestinationInput"} + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.TargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetArn")) + } + if s.TargetArn != nil && len(*s.TargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutDestinationOutput struct { + _ struct{} `type:"structure"` + + // A cross account destination that is the recipient of subscription log events. + Destination *Destination `locationName:"destination" type:"structure"` +} + +// String returns the string representation +func (s PutDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationOutput) GoString() string { + return s.String() +} + +type PutDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // An IAM policy document that authorizes cross-account users to deliver their + // log events to associated destination. + // + // AccessPolicy is a required field + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` + + // A name for an existing destination. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDestinationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDestinationPolicyInput"} + if s.AccessPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("AccessPolicy")) + } + if s.AccessPolicy != nil && len(*s.AccessPolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessPolicy", 1)) + } + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutDestinationPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyOutput) GoString() string { + return s.String() +} + +type PutLogEventsInput struct { + _ struct{} `type:"structure"` + + // A list of log events belonging to a log stream. + // + // LogEvents is a required field + LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` + + // The name of the log group to put log events to. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream to put log events to. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // A string token that must be obtained from the response of the previous PutLogEvents + // request. + SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLogEventsInput"} + if s.LogEvents == nil { + invalidParams.Add(request.NewErrParamRequired("LogEvents")) + } + if s.LogEvents != nil && len(s.LogEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogEvents", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + if s.SequenceToken != nil && len(*s.SequenceToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SequenceToken", 1)) + } + if s.LogEvents != nil { + for i, v := range s.LogEvents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogEvents", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutLogEventsOutput struct { + _ struct{} `type:"structure"` + + // A string token used for making PutLogEvents requests. A sequenceToken can + // only be used once, and PutLogEvents requests must include the sequenceToken + // obtained from the response of the previous request. + NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` + + RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` +} + +// String returns the string representation +func (s PutLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsOutput) GoString() string { + return s.String() +} + +type PutMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A name for the metric filter. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A valid CloudWatch Logs filter pattern for extracting metric data out of + // ingested log events. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group to associate the metric filter with. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A collection of information needed to define how metric data gets emitted. + // + // MetricTransformations is a required field + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.MetricTransformations == nil { + invalidParams.Add(request.NewErrParamRequired("MetricTransformations")) + } + if s.MetricTransformations != nil && len(s.MetricTransformations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricTransformations", 1)) + } + if s.MetricTransformations != nil { + for i, v := range s.MetricTransformations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricTransformations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterOutput) GoString() string { + return s.String() +} + +type PutRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to associate the retention policy with. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Specifies the number of days you want to retain log events in the specified + // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, + // 365, 400, 545, 731, 1827, 3653. + // + // RetentionInDays is a required field + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` +} + +// String returns the string representation +func (s PutRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRetentionPolicyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.RetentionInDays == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionInDays")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyOutput) GoString() string { + return s.String() +} + +type PutSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The ARN of the destination to deliver matching log events to. Currently, + // the supported destinations are: + // + // * An Amazon Kinesis stream belonging to the same account as the subscription + // filter, for same-account delivery. + // + // * A logical destination (used via an ARN of Destination) belonging to + // a different account, for cross-account delivery. + // + // * An Amazon Kinesis Firehose stream belonging to the same account as the + // subscription filter, for same-account delivery. + // + // * An AWS Lambda function belonging to the same account as the subscription + // filter, for same-account delivery. + // + // DestinationArn is a required field + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` + + // A name for the subscription filter. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A valid CloudWatch Logs filter pattern for subscribing to a filtered stream + // of log events. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group to associate the subscription filter with. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants CloudWatch Logs permissions to deliver + // ingested log events to the destination stream. You don't need to provide + // the ARN when you are working with a logical destination (used via an ARN + // of Destination) for cross-account delivery. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutSubscriptionFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutSubscriptionFilterInput"} + if s.DestinationArn == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationArn")) + } + if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) + } + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type RejectedLogEventsInfo struct { + _ struct{} `type:"structure"` + + ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` + + TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` + + TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` +} + +// String returns the string representation +func (s RejectedLogEventsInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectedLogEventsInfo) GoString() string { + return s.String() +} + +// An object indicating the search status of a log stream in a FilterLogEvents +// request. +type SearchedLogStream struct { + _ struct{} `type:"structure"` + + // The name of the log stream. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // Indicates whether all the events in this log stream were searched or more + // data exists to search by paginating further. + SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` +} + +// String returns the string representation +func (s SearchedLogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchedLogStream) GoString() string { + return s.String() +} + +type SubscriptionFilter struct { + _ struct{} `type:"structure"` + + // A point in time expressed as the number of milliseconds since Jan 1, 1970 + // 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` + + // A name for a metric or subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event may contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s SubscriptionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscriptionFilter) GoString() string { + return s.String() +} + +type TestMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event may contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // A list of log event messages to test. + // + // LogEventMessages is a required field + LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TestMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestMetricFilterInput"} + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogEventMessages == nil { + invalidParams.Add(request.NewErrParamRequired("LogEventMessages")) + } + if s.LogEventMessages != nil && len(s.LogEventMessages) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogEventMessages", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TestMetricFilterOutput struct { + _ struct{} `type:"structure"` + + Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` +} + +// String returns the string representation +func (s TestMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterOutput) GoString() string { + return s.String() +} + +const ( + // ExportTaskStatusCodeCancelled is a ExportTaskStatusCode enum value + ExportTaskStatusCodeCancelled = "CANCELLED" + + // ExportTaskStatusCodeCompleted is a ExportTaskStatusCode enum value + ExportTaskStatusCodeCompleted = "COMPLETED" + + // ExportTaskStatusCodeFailed is a ExportTaskStatusCode enum value + ExportTaskStatusCodeFailed = "FAILED" + + // ExportTaskStatusCodePending is a ExportTaskStatusCode enum value + ExportTaskStatusCodePending = "PENDING" + + // ExportTaskStatusCodePendingCancel is a ExportTaskStatusCode enum value + ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" + + // ExportTaskStatusCodeRunning is a ExportTaskStatusCode enum value + ExportTaskStatusCodeRunning = "RUNNING" +) + +const ( + // OrderByLogStreamName is a OrderBy enum value + OrderByLogStreamName = "LogStreamName" + + // OrderByLastEventTime is a OrderBy enum value + OrderByLastEventTime = "LastEventTime" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go new file mode 100644 index 0000000..0c7563a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -0,0 +1,117 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// You can use Amazon CloudWatch Logs to monitor, store, and access your log +// files from Amazon Elastic Compute Cloud (Amazon EC2) instances, Amazon CloudTrail, +// or other sources. You can then retrieve the associated log data from CloudWatch +// Logs using the Amazon CloudWatch console, the CloudWatch Logs commands in +// the AWS CLI, the CloudWatch Logs API, or the CloudWatch Logs SDK. +// +// You can use CloudWatch Logs to: +// +// * Monitor Logs from Amazon EC2 Instances in Real-time: You can use CloudWatch +// Logs to monitor applications and systems using log data. For example, +// CloudWatch Logs can track the number of errors that occur in your application +// logs and send you a notification whenever the rate of errors exceeds a +// threshold you specify. CloudWatch Logs uses your log data for monitoring; +// so, no code changes are required. For example, you can monitor application +// logs for specific literal terms (such as "NullReferenceException") or +// count the number of occurrences of a literal term at a particular position +// in log data (such as "404" status codes in an Apache access log). When +// the term you are searching for is found, CloudWatch Logs reports the data +// to a Amazon CloudWatch metric that you specify. +// +// * Monitor Amazon CloudTrail Logged Events: You can create alarms in Amazon +// CloudWatch and receive notifications of particular API activity as captured +// by CloudTrail and use the notification to perform troubleshooting. +// +// * Archive Log Data: You can use CloudWatch Logs to store your log data +// in highly durable storage. You can change the log retention setting so +// that any log events older than this setting are automatically deleted. +// The CloudWatch Logs agent makes it easy to quickly send both rotated and +// non-rotated log data off of a host and into the log service. You can then +// access the raw log data when you need it. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type CloudWatchLogs struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "logs" + +// New creates a new instance of the CloudWatchLogs client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a CloudWatchLogs client from just a session. +// svc := cloudwatchlogs.New(mySession) +// +// // Create a CloudWatchLogs client with additional configuration +// svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchLogs { + svc := &CloudWatchLogs{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-03-28", + JSONVersion: "1.1", + TargetPrefix: "Logs_20140328", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatchLogs operation and runs any +// custom request initialization. +func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 0000000..e10ca8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,1894 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sts provides a client for AWS Security Token Service. +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRole for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleOutput{} + req.Data = output + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a +// maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: you cannot call +// the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * MalformedPolicyDocument +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * PackedPolicyTooLarge +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * RegionDisabledException +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRoleWithSAML for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithSAML method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithSAMLOutput{} + req.Data = output + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. The duration +// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). +// The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * MalformedPolicyDocument +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * PackedPolicyTooLarge +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * IDPRejectedClaim +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * InvalidIdentityToken +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ExpiredTokenException +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * RegionDisabledException +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRoleWithWebIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithWebIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithWebIdentityOutput{} + req.Data = output + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// The credentials are valid for the duration that you specified when calling +// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to +// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security +// credentials, and then using those credentials to make a request to AWS. +// +// +// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample +// apps that show how to invoke the identity providers, and then how to use +// the information from these providers to get and use temporary security +// credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * MalformedPolicyDocument +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * PackedPolicyTooLarge +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * IDPRejectedClaim +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * IDPCommunicationError +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * InvalidIdentityToken +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ExpiredTokenException +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * RegionDisabledException +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DecodeAuthorizationMessage for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecodeAuthorizationMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &DecodeAuthorizationMessageOutput{} + req.Data = output + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * InvalidAuthorizationMessageException +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + err := req.Send() + return out, err +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetCallerIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCallerIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCallerIdentityOutput{} + req.Data = output + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM identity whose credentials are used to call +// the API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + err := req.Send() + return out, err +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetFederationToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFederationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetFederationTokenOutput{} + req.Data = output + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS security +// credentials of an IAM user. You can also call GetFederationToken using the +// security credentials of an AWS root account, but we do not recommended it. +// Instead, we recommend that you create an IAM user for the purpose of the +// proxy application and then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need access to. For more +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot use these credentials to call any IAM APIs. +// +// * You cannot call any STS APIs. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// * The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// * The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * MalformedPolicyDocument +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * PackedPolicyTooLarge +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * RegionDisabledException +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetSessionToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSessionToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSessionTokenOutput{} + req.Data = output + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// * You cannot call any STS API exceptAssumeRole. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * RegionDisabledException +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + err := req.Send() + return out, err +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@:\/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. An expiration can also be specified in the SAML authentication + // response's SessionNotOnOrAfter value. The actual expiration time is whichever + // value is shorter. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Enabling SAML 2.0 Federated + // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 0000000..4010cc7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 0000000..a9b9b32 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,130 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sts" + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 0000000..339177b --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md new file mode 100644 index 0000000..fc05777 --- /dev/null +++ b/vendor/github.com/beorn7/perks/README.md @@ -0,0 +1,31 @@ +# Perks for Go (golang.org) + +Perks contains the Go package quantile that computes approximate quantiles over +an unbounded data stream within low memory and CPU bounds. + +For more information and examples, see: +http://godoc.org/github.com/bmizerany/perks + +A very special thank you and shout out to Graham Cormode (Rutgers University), +Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and +Divesh Srivastava (AT&T Labs–Research) for their research and publication of +[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) + +Thank you, also: +* Armon Dadgar (@armon) +* Andrew Gerrand (@nf) +* Brad Fitzpatrick (@bradfitz) +* Keith Rarick (@kr) + +FAQ: + +Q: Why not move the quantile package into the project root? +A: I want to add more packages to perks later. + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 0000000..f4cabd6 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,292 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targets map[float64]float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile + } else { + f = (2 * epsilon * (s.n - r)) / (1 - quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE new file mode 100644 index 0000000..004e77f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md new file mode 100644 index 0000000..2a69d95 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/README.md @@ -0,0 +1,857 @@ +Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 0000000..820d533 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 0000000..98fafdb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 0000000..7e5cb4b --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 0000000..b26d84f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,12 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 0000000..2b67666 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 0000000..7058c3d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 0000000..645ddc3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 0000000..2dc6be0 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 0000000..8c143bc --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 0000000..d7c39af --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 0000000..cad62dd --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 0000000..307bf2b --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 0000000..d538e6a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path+lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 0000000..f504425 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go new file mode 100644 index 0000000..511ce72 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,778 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } else { + return nil, ErrIncompatibleValue + } + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go new file mode 100644 index 0000000..1be9f35 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go new file mode 100644 index 0000000..1223493 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db.go @@ -0,0 +1,1036 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + // If we can't read the page size, we can assume it's the same + // as the OS -- since that's how the page size was chosen in the + // first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + db.pageSize = os.Getpagesize() + } else { + db.pageSize = int(m.pageSize) + } + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + db.txs = append(db.txs[:i], db.txs[i+1:]...) + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = other.TxN - s.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go new file mode 100644 index 0000000..cc93784 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go new file mode 100644 index 0000000..a3620a3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -0,0 +1,71 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go new file mode 100644 index 0000000..d32f6cd --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,248 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// all returns a list of all free ids and all pending ids in one sorted list. +func (f *freelist) all() []pgid { + m := make(pgids, 0) + + for _, list := range f.pending { + m = append(m, list...) + } + + sort.Sort(m) + return pgids(f.ids).merge(m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + } + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + ids := f.all() + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + if len(ids) == 0 { + p.count = uint16(len(ids)) + } else if len(ids) < 0xFFFF { + p.count = uint16(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool, len(f.ids)) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go new file mode 100644 index 0000000..159318b --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node.go @@ -0,0 +1,604 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go new file mode 100644 index 0000000..7651a6b --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page.go @@ -0,0 +1,178 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + // Create a list to hold all elements from both lists. + merged := make(pgids, 0, len(a)+len(b)) + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + merged = append(merged, follow...) + + return merged +} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go new file mode 100644 index 0000000..1cfb4cd --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -0,0 +1,682 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + for _, id := range tx.db.freelist.all() { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/bsphere/le_go/LICENSE b/vendor/github.com/bsphere/le_go/LICENSE new file mode 100644 index 0000000..03f6677 --- /dev/null +++ b/vendor/github.com/bsphere/le_go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Gal Ben-Haim + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bsphere/le_go/README.md b/vendor/github.com/bsphere/le_go/README.md new file mode 100644 index 0000000..9d2ca9a --- /dev/null +++ b/vendor/github.com/bsphere/le_go/README.md @@ -0,0 +1,37 @@ +le_go +===== + +Golang client library for logentries.com + +It is compatible with http://golang.org/pkg/log/#Logger +and also implements http://golang.org/pkg/io/#Writer + +[![GoDoc](https://godoc.org/github.com/bsphere/le_go?status.png)](https://godoc.org/github.com/bsphere/le_go) + +[![Build Status](https://travis-ci.org/bsphere/le_go.svg)](https://travis-ci.org/bsphere/le_go) + +Usage +----- +Add a new manual TCP token log at [logentries.com](https://logentries.com/quick-start/) and copy the [token](https://logentries.com/doc/input-token/). + +Installation: `go get github.com/bsphere/le_go` + +**Note:** The Logger is blocking, it can be easily run in a goroutine by calling `go le.Println(...)` + +```go +package main + +import "github.com/bsphere/le_go" + +func main() { + le, err := le_go.Connect("XXXX-XXXX-XXXX-XXXX") // replace with token + if err != nil { + panic(err) + } + + defer le.Close() + + le.Println("another test message") +} +``` + diff --git a/vendor/github.com/bsphere/le_go/le.go b/vendor/github.com/bsphere/le_go/le.go new file mode 100644 index 0000000..553e4c7 --- /dev/null +++ b/vendor/github.com/bsphere/le_go/le.go @@ -0,0 +1,216 @@ +// Package le_go provides a Golang client library for logging to +// logentries.com over a TCP connection. +// +// it uses an access token for sending log events. +package le_go + +import ( + "crypto/tls" + "fmt" + "net" + "os" + "strings" + "sync" + "time" +) + +// Logger represents a Logentries logger, +// it holds the open TCP connection, access token, prefix and flags. +// +// all Logger operations are thread safe and blocking, +// log operations can be invoked in a non-blocking way by calling them from +// a goroutine. +type Logger struct { + conn net.Conn + flag int + mu sync.Mutex + prefix string + token string + buf []byte +} + +const lineSep = "\n" + +// Connect creates a new Logger instance and opens a TCP connection to +// logentries.com, +// The token can be generated at logentries.com by adding a new log, +// choosing manual configuration and token based TCP connection. +func Connect(token string) (*Logger, error) { + logger := Logger{ + token: token, + } + + if err := logger.openConnection(); err != nil { + return nil, err + } + + return &logger, nil +} + +// Close closes the TCP connection to logentries.com +func (logger *Logger) Close() error { + if logger.conn != nil { + return logger.conn.Close() + } + + return nil +} + +// Opens a TCP connection to logentries.com +func (logger *Logger) openConnection() error { + conn, err := tls.Dial("tcp", "data.logentries.com:443", &tls.Config{}) + if err != nil { + return err + } + logger.conn = conn + return nil +} + +// It returns if the TCP connection to logentries.com is open +func (logger *Logger) isOpenConnection() bool { + if logger.conn == nil { + return false + } + + buf := make([]byte, 1) + + logger.conn.SetReadDeadline(time.Now()) + + _, err := logger.conn.Read(buf) + + switch err.(type) { + case net.Error: + if err.(net.Error).Timeout() == true { + logger.conn.SetReadDeadline(time.Time{}) + + return true + } + } + + return false +} + +// It ensures that the TCP connection to logentries.com is open. +// If the connection is closed, a new one is opened. +func (logger *Logger) ensureOpenConnection() error { + if !logger.isOpenConnection() { + if err := logger.openConnection(); err != nil { + return err + } + } + + return nil +} + +// Fatal is same as Print() but calls to os.Exit(1) +func (logger *Logger) Fatal(v ...interface{}) { + logger.Output(2, fmt.Sprint(v...)) + os.Exit(1) +} + +// Fatalf is same as Printf() but calls to os.Exit(1) +func (logger *Logger) Fatalf(format string, v ...interface{}) { + logger.Output(2, fmt.Sprintf(format, v...)) + os.Exit(1) +} + +// Fatalln is same as Println() but calls to os.Exit(1) +func (logger *Logger) Fatalln(v ...interface{}) { + logger.Output(2, fmt.Sprintln(v...)) + os.Exit(1) +} + +// Flags returns the logger flags +func (logger *Logger) Flags() int { + return logger.flag +} + +// Output does the actual writing to the TCP connection +func (logger *Logger) Output(calldepth int, s string) error { + _, err := logger.Write([]byte(s)) + + return err +} + +// Panic is same as Print() but calls to panic +func (logger *Logger) Panic(v ...interface{}) { + s := fmt.Sprint(v...) + logger.Output(2, s) + panic(s) +} + +// Panicf is same as Printf() but calls to panic +func (logger *Logger) Panicf(format string, v ...interface{}) { + s := fmt.Sprintf(format, v...) + logger.Output(2, s) + panic(s) +} + +// Panicln is same as Println() but calls to panic +func (logger *Logger) Panicln(v ...interface{}) { + s := fmt.Sprintln(v...) + logger.Output(2, s) + panic(s) +} + +// Prefix returns the logger prefix +func (logger *Logger) Prefix() string { + return logger.prefix +} + +// Print logs a message +func (logger *Logger) Print(v ...interface{}) { + logger.Output(2, fmt.Sprint(v...)) +} + +// Printf logs a formatted message +func (logger *Logger) Printf(format string, v ...interface{}) { + logger.Output(2, fmt.Sprintf(format, v...)) +} + +// Println logs a message with a linebreak +func (logger *Logger) Println(v ...interface{}) { + logger.Output(2, fmt.Sprintln(v...)) +} + +// SetFlags sets the logger flags +func (logger *Logger) SetFlags(flag int) { + logger.flag = flag +} + +// SetPrefix sets the logger prefix +func (logger *Logger) SetPrefix(prefix string) { + logger.prefix = prefix +} + +// Write writes a bytes array to the Logentries TCP connection, +// it adds the access token and prefix and also replaces +// line breaks with the unicode \u2028 character +func (logger *Logger) Write(p []byte) (n int, err error) { + if err := logger.ensureOpenConnection(); err != nil { + return 0, err + } + + logger.mu.Lock() + defer logger.mu.Unlock() + + logger.makeBuf(p) + + return logger.conn.Write(logger.buf) +} + +// makeBuf constructs the logger buffer +// it is not safe to be used from within multiple concurrent goroutines +func (logger *Logger) makeBuf(p []byte) { + count := strings.Count(string(p), lineSep) + p = []byte(strings.Replace(string(p), lineSep, "\u2028", count-1)) + + logger.buf = logger.buf[:0] + logger.buf = append(logger.buf, (logger.token + " ")...) + logger.buf = append(logger.buf, (logger.prefix + " ")...) + logger.buf = append(logger.buf, p...) + + if !strings.HasSuffix(string(logger.buf), lineSep) { + logger.buf = append(logger.buf, (lineSep)...) + } +} diff --git a/vendor/github.com/cloudflare/cfssl/LICENSE b/vendor/github.com/cloudflare/cfssl/LICENSE new file mode 100644 index 0000000..bc5841f --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2014 CloudFlare Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cloudflare/cfssl/README.md b/vendor/github.com/cloudflare/cfssl/README.md new file mode 100644 index 0000000..7285eee --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/README.md @@ -0,0 +1,438 @@ +# CFSSL + +[![Build Status](https://travis-ci.org/cloudflare/cfssl.svg?branch=master)](https://travis-ci.org/cloudflare/cfssl) +[![Coverage Status](http://codecov.io/github/cloudflare/cfssl/coverage.svg?branch=master)](http://codecov.io/github/cloudflare/cfssl?branch=master) +[![GoDoc](https://godoc.org/github.com/cloudflare/cfssl?status.svg)](https://godoc.org/github.com/cloudflare/cfssl) + +## CloudFlare's PKI/TLS toolkit + +CFSSL is CloudFlare's PKI/TLS swiss army knife. It is both a command line +tool and an HTTP API server for signing, verifying, and bundling TLS +certificates. It requires Go 1.5+ to build. + +Note that certain linux distributions have certain algorithms removed +(RHEL-based distributions in particular), so the golang from the +official repositories will not work. Users of these distributions should +[install go manually](//golang.org/dl) to install CFSSL. + +CFSSL consists of: + +* a set of packages useful for building custom TLS PKI tools +* the `cfssl` program, which is the canonical command line utility + using the CFSSL packages. +* the `multirootca` program, which is a certificate authority server + that can use multiple signing keys. +* the `mkbundle` program is used to build certificate pool bundles. +* the `cfssljson` program, which takes the JSON output from the + `cfssl` and `multirootca` programs and writes certificates, keys, + CSRs, and bundles to disk. + +### Building + +See [BUILDING](BUILDING.md) + +### Installation + +Installation requires a +[working Go 1.5+ installation](http://golang.org/doc/install) and a +properly set `GOPATH`. + +``` +$ go get -u github.com/cloudflare/cfssl/cmd/cfssl +``` + +will download and build the CFSSL tool, installing it in +`$GOPATH/bin/cfssl`. To install the other utility programs that are in +this repo: + +``` +$ go get -u github.com/cloudflare/cfssl/cmd/... +``` + +This will download, build, and install `cfssl`, `cfssljson`, and +`mkbundle` into `$GOPATH/bin/`. + +Note that CFSSL makes use of vendored packages; in Go 1.5, the +`GO15VENDOREXPERIMENT` environment variable will need to be set, e.g. + +``` +export GO15VENDOREXPERIMENT=1 +``` + +In Go 1.6, this works out of the box. + +#### Installing pre-Go 1.5 +With a Go 1.4 or earlier installation, you won't be able to install the latest version of CFSSL. However, you can checkout the `1.1.0` release and build that. + +``` +git clone -b 1.1.0 https://github.com/cloudflare/cfssl.git $GOPATH/src/github.com/cloudflare/cfssl +go get github.com/cloudflare/cfssl/cmd/cfssl +``` + +### Using the Command Line Tool + +The `cfssl` command line tool takes a command to specify what +operation it should carry out: + + sign signs a certificate + bundle build a certificate bundle + genkey generate a private key and a certificate request + gencert generate a private key and a certificate + serve start the API server + version prints out the current version + selfsign generates a self-signed certificate + print-defaults print default configurations + +Use "cfssl [command] -help" to find out more about a command. +The version command takes no arguments. + +#### Signing + +``` +cfssl sign [-ca cert] [-ca-key key] [-hostname comma,separated,hostnames] csr [subject] +``` + +The csr is the client's certificate request. The `-ca` and `-ca-key` +flags are the CA's certificate and private key, respectively. By +default, they are "ca.pem" and "ca_key.pem". The `-hostname` is +a comma separated hostname list that overrides the DNS names and +IP address in the certificate SAN extension. +For example, assuming the CA's private key is in +`/etc/ssl/private/cfssl_key.pem` and the CA's certificate is in +`/etc/ssl/certs/cfssl.pem`, to sign the `cloudflare.pem` certificate +for cloudflare.com: + +``` +cfssl sign -ca /etc/ssl/certs/cfssl.pem \ + -ca-key /etc/ssl/private/cfssl_key.pem \ + -hostname cloudflare.com ./cloudflare.pem +``` + +It is also possible to specify csr through '-csr' flag. By doing so, +flag values take precedence and will overwrite the argument. + +The subject is an optional file that contains subject information that +should be used in place of the information from the CSR. It should be +a JSON file with the type: + +```json +{ + "CN": "example.com", + "names": [ + { + "C": "US", + "L": "San Francisco", + "O": "Internet Widgets, Inc.", + "OU": "WWW", + "ST": "California" + } + ] +} +``` + +#### Bundling + +``` +cfssl bundle [-ca-bundle bundle] [-int-bundle bundle] \ + [-metadata metadata_file] [-flavor bundle_flavor] \ + -cert certificate_file [-key key_file] +``` + +The bundles are used for the root and intermediate certificate +pools. In addition, platform metadata is specified through '-metadata' +The bundle files, metadata file (and auxiliary files) can be +found at [cfssl_trust](https://github.com/cloudflare/cfssl_trust) + + +Specify PEM-encoded client certificate and key through '-cert' and +'-key' respectively. If key is specified, the bundle will be built +and verified with the key. Otherwise the bundle will be built +without a private key. Instead of file path, use '-' for reading +certificate PEM from stdin. It is also acceptable the certificate +file contains a (partial) certificate bundle. + +Specify bundling flavor through '-flavor'. There are three flavors: +'optimal' to generate a bundle of shortest chain and most advanced +cryptographic algorithms, 'ubiquitous' to generate a bundle of most +widely acceptance across different browsers and OS platforms, and +'force' to find an acceptable bundle which is identical to the +content of the input certificate file. + +Alternatively, the client certificate can be pulled directly from +a domain. It is also possible to connect to the remote address +through '-ip'. + +``` +cfssl bundle [-ca-bundle bundle] [-int-bundle bundle] \ + [-metadata metadata_file] [-flavor bundle_flavor] \ + -domain domain_name [-ip ip_address] +``` + +The bundle output form should follow the example + +```json +{ + "bundle": "CERT_BUNDLE_IN_PEM", + "crt": "LEAF_CERT_IN_PEM", + "crl_support": true, + "expires": "2015-12-31T23:59:59Z", + "hostnames": ["example.com"], + "issuer": "ISSUER CERT SUBJECT", + "key": "KEY_IN_PEM", + "key_size": 2048, + "key_type": "2048-bit RSA", + "ocsp": ["http://ocsp.example-ca.com"], + "ocsp_support": true, + "root": "ROOT_CA_CERT_IN_PEM", + "signature": "SHA1WithRSA", + "subject": "LEAF CERT SUBJECT", + "status": { + "rebundled": false, + "expiring_SKIs": [], + "untrusted_root_stores": [], + "messages": [], + "code": 0 + } +} +``` + + +#### Generating certificate signing request and private key + +``` +cfssl genkey csr.json +``` + +To generate a private key and corresponding certificate request, specify +the key request as a JSON file. This file should follow the form + +```json +{ + "hosts": [ + "example.com", + "www.example.com" + ], + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "San Francisco", + "O": "Internet Widgets, Inc.", + "OU": "WWW", + "ST": "California" + } + ] +} +``` + +#### Generating self-signed root CA certificate and private key + +``` +cfssl genkey -initca csr.json | cfssljson -bare ca +``` + +To generate a self-signed root CA certificate, specify the key request as +the JSON file in the same format as in 'genkey'. Three PEM-encoded entities +will appear in the output: the private key, the csr, and the self-signed +certificate. + +#### Generating a remote-issued certificate and private key. + +``` +cfssl gencert -remote=remote_server [-hostname=comma,separated,hostnames] csr.json +``` + +This is calls genkey, but has a remote CFSSL server sign and issue +a certificate. You may use `-hostname` to override certificate SANs. + +#### Generating a local-issued certificate and private key. + +``` +cfssl gencert -ca cert -ca-key key [-hostname=comma,separated,hostnames] csr.json +``` + +This is generates and issues a certificate and private key from a local CA +via a JSON request. You may use `-hostname` to override certificate SANs. + + +#### Updating a OCSP responses file with a newly issued certificate + +``` +cfssl ocspsign -ca cert -responder key -responder-key key -cert cert \ + | cfssljson -bare -stdout >> responses +``` + +This will generate a OCSP response for the `cert` and add it to the +`responses` file. You can then pass `responses` to `ocspserve` to start a +OCSP server. + +### Starting the API Server + +CFSSL comes with an HTTP-based API server; the endpoints are +documented in `doc/api.txt`. The server is started with the "serve" +command: + +``` +cfssl serve [-address address] [-ca cert] [-ca-bundle bundle] \ + [-ca-key key] [-int-bundle bundle] [-int-dir dir] [-port port] \ + [-metadata file] [-remote remote_host] [-config config] \ + [-responder cert] [-responder-key key] [-db-config db-config] +``` + +Address and port default to "127.0.0.1:8888". The `-ca` and `-ca-key` +arguments should be the PEM-encoded certificate and private key to use +for signing; by default, they are "ca.pem" and "ca_key.pem". The +`-ca-bundle` and `-int-bundle` should be the certificate bundles used +for the root and intermediate certificate pools, respectively. These +default to "ca-bundle.crt" and "int-bundle." If the "remote" option is +provided, all signature operations will be forwarded to the remote CFSSL. + +'-int-dir' specifies intermediates directory. '-metadata' is a file for +root certificate presence. The content of the file is a json dictionary +(k,v): each key k is SHA-1 digest of a root certificate while value v +is a list of key store filenames. '-config' specifies path to configuration +file. '-responder' and '-responder-key' are Certificate for OCSP responder +and private key for OCSP responder certificate, respectively. + +The amount of logging can be controlled with the `-loglevel` option. This +comes *after* the serve command: + +``` +cfssl serve -loglevel 2 +``` + +The levels are: + +* 0. DEBUG +* 1. INFO (this is the default level) +* 2. WARNING +* 3. ERROR +* 4. CRITICAL + +### The multirootca + +The `cfssl` program can act as an online certificate authority, but it +only uses a single key. If multiple signing keys are needed, the +`multirootca` program can be used. It only provides the sign, +authsign, and info endpoints. The documentation contains instructions +for configuring and running the CA. + +### The mkbundle Utility + +`mkbundle` is used to build the root and intermediate bundles used in +verifying certificates. It can be installed with + +``` +go get -u github.com/cloudflare/cfssl/cmd/mkbundle +``` + +It takes a collection of certificates, checks for CRL revocation (OCSP +support is planned for the next release) and expired certificates, and +bundles them into one file. It takes directories of certificates and +certificate files (which may contain multiple certificates). For example, +if the directory `intermediates` contains a number of intermediate +certificates, + +``` +mkbundle -f int-bundle.crt intermediates +``` + +will check those certificates and combine valid ones into a single +`int-bundle.crt` file. + +The `-f` flag specifies an output name; `-loglevel` specifies the verbosity +of the logging (using the same loglevels above), and `-nw` controls the +number of revocation-checking workers. + +### The cfssljson Utility + +Most of the output from `cfssl` is in JSON. The `cfssljson` will take +this output and split it out into separate key, certificate, CSR, and +bundle files as appropriate. The tool takes a single flag, `-f`, that +specifies the input file, and an argument that specifies the base name for +the files produced. If the input filename is "-" (which is the default), +`cfssljson` reads from standard input. It maps keys in the JSON file to +filenames in the following way: + +* if there is a "cert" (or if not, if there's a "certificate") field, the + file "basename.pem" will be produced. +* if there is a "key" (or if not, if there's a "private_key") field, the + file "basename-key.pem" will be produced. +* if there is a "csr" (or if not, if there's a "certificate_request") field, + the file "basename.csr" will be produced. +* if there is a "bundle" field, the file "basename-bundle.pem" will + be produced. +* if there is a "ocspResponse" field, the file "basename-response.der" will + be produced. + +Instead of saving to a file, you can pass `-stdout` to output the encoded +contents. + +### Static Builds + +By default, the web assets are accessed from disk, based on their +relative locations. If you’re wishing to distribute a single, +statically-linked, cfssl binary, you’ll want to embed these resources +before building. This can by done with the +[go.rice](https://github.com/GeertJohan/go.rice) tool. + +``` +pushd cli/serve && rice embed-go && popd +``` + +Then building with `go build` will use the embedded resources. + +### Using a PKCS#11 hardware token / HSM + +For better security, you may want to store your private key in an HSM or +smartcard. The interface to both of these categories of device is described by +the PKCS#11 spec. If you need to do approximately one signing operation per +second or fewer, the Yubikey NEO and NEO-n are inexpensive smartcard options: +https://www.yubico.com/products/yubikey-hardware/yubikey-neo/. In general you +are looking for a product that supports PIV (personal identity verification). If +your signing needs are in the hundreds of signatures per second, you will need +to purchase an expensive HSM (in the thousands to many thousands of USD). + +If you want to try out the PKCS#11 signing modes without a hardware token, you +can use the [SoftHSM](https://github.com/opendnssec/SoftHSMv1#softhsm) +implementation. Please note that using SoftHSM simply stores your private key in +a file on disk and does not increase security. + +To get started with your PKCS#11 token you will need to initialize it with a +private key, PIN, and token label. The instructions to do this will be specific +to each hardware device, and you should follow the instructions provided by your +vendor. You will also need to find the path to your 'module', a shared object +file (.so). Having initialized your device, you can query it to check your token +label with: + + pkcs11-tool --module --list-token-slots + +You'll also want to check the label of the private key you imported (or +generated). Run the following command and look for a 'Private Key Object': + + pkcs11-tool --module --pin \ + --list-token-slots --login --list-objects + +You now have all the information you need to use your PKCS#11 token with CFSSL. +CFSSL supports PKCS#11 for certificate signing and OCSP signing. To create a +Signer (for certificate signing), import `signer/universal` and call NewSigner +with a Root object containing the module, pin, token label and private label +from above, plus a path to your certificate. The structure of the Root object is +documented in universal.go. + +Alternately, you can construct a pkcs11key.Key or pkcs11key.Pool yourself, and +pass it to ocsp.NewSigner (for OCSP) or local.NewSigner (for certificate +signing). This will be necessary, for example, if you are using a single-session +token like the Yubikey and need both OCSP signing and certificate signing at the +same time. + +### Additional Documentation + +Additional documentation can be found in the "doc/" directory: + +* `api.txt`: documents the API endpoints +* `bootstrap.txt`: a walkthrough from building the package to getting + up and running diff --git a/vendor/github.com/cloudflare/cfssl/api/api.go b/vendor/github.com/cloudflare/cfssl/api/api.go new file mode 100644 index 0000000..f1040ca --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/api/api.go @@ -0,0 +1,231 @@ +// Package api implements an HTTP-based API and server for CFSSL. +package api + +import ( + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/log" +) + +// Handler is an interface providing a generic mechanism for handling HTTP requests. +type Handler interface { + Handle(w http.ResponseWriter, r *http.Request) error +} + +// HTTPHandler is a wrapper that encapsulates Handler interface as http.Handler. +// HTTPHandler also enforces that the Handler only responds to requests with registered HTTP methods. +type HTTPHandler struct { + Handler // CFSSL handler + Methods []string // The associated HTTP methods +} + +// HandlerFunc is similar to the http.HandlerFunc type; it serves as +// an adapter allowing the use of ordinary functions as Handlers. If +// f is a function with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(http.ResponseWriter, *http.Request) error + +// Handle calls f(w, r) +func (f HandlerFunc) Handle(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "application/json") + return f(w, r) +} + +// handleError is the centralised error handling and reporting. +func handleError(w http.ResponseWriter, err error) (code int) { + if err == nil { + return http.StatusOK + } + msg := err.Error() + httpCode := http.StatusInternalServerError + + // If it is recognized as HttpError emitted from cfssl, + // we rewrite the status code accordingly. If it is a + // cfssl error, set the http status to StatusBadRequest + switch err := err.(type) { + case *errors.HTTPError: + httpCode = err.StatusCode + code = err.StatusCode + case *errors.Error: + httpCode = http.StatusBadRequest + code = err.ErrorCode + msg = err.Message + } + + response := NewErrorResponse(msg, code) + jsonMessage, err := json.Marshal(response) + if err != nil { + log.Errorf("Failed to marshal JSON: %v", err) + } else { + msg = string(jsonMessage) + } + http.Error(w, msg, httpCode) + return code +} + +// ServeHTTP encapsulates the call to underlying Handler to handle the request +// and return the response with proper HTTP status code +func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var err error + var match bool + // Throw 405 when requested with an unsupported verb. + for _, m := range h.Methods { + if m == r.Method { + match = true + } + } + if match { + err = h.Handle(w, r) + } else { + err = errors.NewMethodNotAllowed(r.Method) + } + status := handleError(w, err) + log.Infof("%s - \"%s %s\" %d", r.RemoteAddr, r.Method, r.URL, status) +} + +// readRequestBlob takes a JSON-blob-encoded response body in the form +// map[string]string and returns it, the list of keywords presented, +// and any error that occurred. +func readRequestBlob(r *http.Request) (map[string]string, error) { + var blob map[string]string + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + + err = json.Unmarshal(body, &blob) + if err != nil { + return nil, err + } + return blob, nil +} + +// ProcessRequestOneOf reads a JSON blob for the request and makes +// sure it contains one of a set of keywords. For example, a request +// might have the ('foo' && 'bar') keys, OR it might have the 'baz' +// key. In either case, we want to accept the request; however, if +// none of these sets shows up, the request is a bad request, and it +// should be returned. +func ProcessRequestOneOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) { + blob, err := readRequestBlob(r) + if err != nil { + return nil, nil, err + } + + var matched []string + for _, set := range keywordSets { + if matchKeywords(blob, set) { + if matched != nil { + return nil, nil, errors.NewBadRequestString("mismatched parameters") + } + matched = set + } + } + if matched == nil { + return nil, nil, errors.NewBadRequestString("no valid parameter sets found") + } + return blob, matched, nil +} + +// ProcessRequestFirstMatchOf reads a JSON blob for the request and returns +// the first match of a set of keywords. For example, a request +// might have one of the following combinations: (foo=1, bar=2), (foo=1), and (bar=2) +// By giving a specific ordering of those combinations, we could decide how to accept +// the request. +func ProcessRequestFirstMatchOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) { + blob, err := readRequestBlob(r) + if err != nil { + return nil, nil, err + } + + for _, set := range keywordSets { + if matchKeywords(blob, set) { + return blob, set, nil + } + } + return nil, nil, errors.NewBadRequestString("no valid parameter sets found") +} + +func matchKeywords(blob map[string]string, keywords []string) bool { + for _, keyword := range keywords { + if _, ok := blob[keyword]; !ok { + return false + } + } + return true +} + +// ResponseMessage implements the standard for response errors and +// messages. A message has a code and a string message. +type ResponseMessage struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// Response implements the CloudFlare standard for API +// responses. +type Response struct { + Success bool `json:"success"` + Result interface{} `json:"result"` + Errors []ResponseMessage `json:"errors"` + Messages []ResponseMessage `json:"messages"` +} + +// NewSuccessResponse is a shortcut for creating new successul API +// responses. +func NewSuccessResponse(result interface{}) Response { + return Response{ + Success: true, + Result: result, + Errors: []ResponseMessage{}, + Messages: []ResponseMessage{}, + } +} + +// NewSuccessResponseWithMessage is a shortcut for creating new successul API +// responses that includes a message. +func NewSuccessResponseWithMessage(result interface{}, message string, code int) Response { + return Response{ + Success: true, + Result: result, + Errors: []ResponseMessage{}, + Messages: []ResponseMessage{{code, message}}, + } +} + +// NewErrorResponse is a shortcut for creating an error response for a +// single error. +func NewErrorResponse(message string, code int) Response { + return Response{ + Success: false, + Result: nil, + Errors: []ResponseMessage{{code, message}}, + Messages: []ResponseMessage{}, + } +} + +// SendResponse builds a response from the result, sets the JSON +// header, and writes to the http.ResponseWriter. +func SendResponse(w http.ResponseWriter, result interface{}) error { + response := NewSuccessResponse(result) + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + err := enc.Encode(response) + return err +} + +// SendResponseWithMessage builds a response from the result and the +// provided message, sets the JSON header, and writes to the +// http.ResponseWriter. +func SendResponseWithMessage(w http.ResponseWriter, result interface{}, message string, code int) error { + response := NewSuccessResponseWithMessage(result, message, code) + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + err := enc.Encode(response) + return err +} diff --git a/vendor/github.com/cloudflare/cfssl/auth/auth.go b/vendor/github.com/cloudflare/cfssl/auth/auth.go new file mode 100644 index 0000000..ecd5e5f --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/auth/auth.go @@ -0,0 +1,94 @@ +// Package auth implements an interface for providing CFSSL +// authentication. This is meant to authenticate a client CFSSL to a +// remote CFSSL in order to prevent unauthorised use of the signature +// capabilities. This package provides both the interface and a +// standard HMAC-based implementation. +package auth + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "strings" +) + +// An AuthenticatedRequest contains a request and authentication +// token. The Provider may determine whether to validate the timestamp +// and remote address. +type AuthenticatedRequest struct { + // An Authenticator decides whether to use this field. + Timestamp int64 `json:"timestamp,omitempty"` + RemoteAddress []byte `json:"remote_address,omitempty"` + Token []byte `json:"token"` + Request []byte `json:"request"` +} + +// A Provider can generate tokens from a request and verify a +// request. The handling of additional authentication data (such as +// the IP address) is handled by the concrete type, as is any +// serialisation and state-keeping. +type Provider interface { + Token(req []byte) (token []byte, err error) + Verify(aReq *AuthenticatedRequest) bool +} + +// Standard implements an HMAC-SHA-256 authentication provider. It may +// be supplied additional data at creation time that will be used as +// request || additional-data with the HMAC. +type Standard struct { + key []byte + ad []byte +} + +// New generates a new standard authentication provider from the key +// and additional data. The additional data will be used when +// generating a new token. +func New(key string, ad []byte) (*Standard, error) { + if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 { + switch splitKey[0] { + case "env": + key = os.Getenv(splitKey[1]) + case "file": + data, err := ioutil.ReadFile(splitKey[1]) + if err != nil { + return nil, err + } + key = string(data) + default: + return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0]) + } + } + + keyBytes, err := hex.DecodeString(key) + if err != nil { + return nil, err + } + + return &Standard{keyBytes, ad}, nil +} + +// Token generates a new authentication token from the request. +func (p Standard) Token(req []byte) (token []byte, err error) { + h := hmac.New(sha256.New, p.key) + h.Write(req) + h.Write(p.ad) + return h.Sum(nil), nil +} + +// Verify determines whether an authenticated request is valid. +func (p Standard) Verify(ad *AuthenticatedRequest) bool { + if ad == nil { + return false + } + + // Standard token generation returns no error. + token, _ := p.Token(ad.Request) + if len(ad.Token) != len(token) { + return false + } + + return hmac.Equal(token, ad.Token) +} diff --git a/vendor/github.com/cloudflare/cfssl/certdb/README.md b/vendor/github.com/cloudflare/cfssl/certdb/README.md new file mode 100644 index 0000000..fbd941e --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/certdb/README.md @@ -0,0 +1,75 @@ +# certdb usage + +Using a database enables additional functionality for existing commands when a +db config is provided: + + - `sign` and `gencert` add a certificate to the certdb after signing it + - `serve` enables database functionality for the sign and revoke endpoints + +A database is required for the following: + + - `revoke` marks certificates revoked in the database with an optional reason + - `ocsprefresh` refreshes the table of cached OCSP responses + - `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format + +## Setup/Migration + +This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends. +Currently supported: + - MySQL in mysql + - PostgreSQL in pg + - SQLite in sqlite + +### Get goose + + go get bitbucket.org/liamstask/goose/cmd/goose + +### Use goose to start and terminate a MySQL DB +To start a MySQL using goose: + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up + +To tear down a MySQL DB using goose + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down + +Note: the administration of MySQL DB is not included. We assume +the databases being connected to are already created and access control +is properly handled. + +### Use goose to start and terminate a PostgreSQL DB +To start a PostgreSQL using goose: + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up + +To tear down a PostgreSQL DB using goose + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down + +Note: the administration of PostgreSQL DB is not included. We assume +the databases being connected to are already created and access control +is properly handled. + +### Use goose to start and terminate a SQLite DB +To start a SQLite DB using goose: + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up + +To tear down a SQLite DB using goose + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down + +## CFSSL Configuration + +Several cfssl commands take a -db-config flag. Create a file with a +JSON dictionary: + + {"driver":"sqlite3","data_source":"certs.db"} + +or + + {"driver":"postgres","data_source":"postgres://user:password@host/db"} + +or + + {"driver":"mysql","data_source":"user:password@tcp(hostname:3306)/db?parseTime=true"} diff --git a/vendor/github.com/cloudflare/cfssl/certdb/certdb.go b/vendor/github.com/cloudflare/cfssl/certdb/certdb.go new file mode 100644 index 0000000..96694f7 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/certdb/certdb.go @@ -0,0 +1,40 @@ +package certdb + +import ( + "time" +) + +// CertificateRecord encodes a certificate and its metadata +// that will be recorded in a database. +type CertificateRecord struct { + Serial string `db:"serial_number"` + AKI string `db:"authority_key_identifier"` + CALabel string `db:"ca_label"` + Status string `db:"status"` + Reason int `db:"reason"` + Expiry time.Time `db:"expiry"` + RevokedAt time.Time `db:"revoked_at"` + PEM string `db:"pem"` +} + +// OCSPRecord encodes a OCSP response body and its metadata +// that will be recorded in a database. +type OCSPRecord struct { + Serial string `db:"serial_number"` + AKI string `db:"authority_key_identifier"` + Body string `db:"body"` + Expiry time.Time `db:"expiry"` +} + +// Accessor abstracts the CRUD of certdb objects from a DB. +type Accessor interface { + InsertCertificate(cr CertificateRecord) error + GetCertificate(serial, aki string) ([]CertificateRecord, error) + GetUnexpiredCertificates() ([]CertificateRecord, error) + RevokeCertificate(serial, aki string, reasonCode int) error + InsertOCSP(rr OCSPRecord) error + GetOCSP(serial, aki string) ([]OCSPRecord, error) + GetUnexpiredOCSPs() ([]OCSPRecord, error) + UpdateOCSP(serial, aki, body string, expiry time.Time) error + UpsertOCSP(serial, aki, body string, expiry time.Time) error +} diff --git a/vendor/github.com/cloudflare/cfssl/config/config.go b/vendor/github.com/cloudflare/cfssl/config/config.go new file mode 100644 index 0000000..d5eb7c3 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/config/config.go @@ -0,0 +1,659 @@ +// Package config contains the configuration logic for CFSSL. +package config + +import ( + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" + "time" + + "github.com/cloudflare/cfssl/auth" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/log" + ocspConfig "github.com/cloudflare/cfssl/ocsp/config" +) + +// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is +// not present in a SigningProfile, all of these fields may be copied from the +// CSR into the signed certificate. If a CSRWhitelist *is* present in a +// SigningProfile, only those fields with a `true` value in the CSRWhitelist may +// be copied from the CSR to the signed certificate. Note that some of these +// fields, like Subject, can be provided or partially provided through the API. +// Since API clients are expected to be trusted, but CSRs are not, fields +// provided through the API are not subject to whitelisting through this +// mechanism. +type CSRWhitelist struct { + Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool + DNSNames, IPAddresses, EmailAddresses bool +} + +// OID is our own version of asn1's ObjectIdentifier, so we can define a custom +// JSON marshal / unmarshal. +type OID asn1.ObjectIdentifier + +// CertificatePolicy represents the ASN.1 PolicyInformation structure from +// https://tools.ietf.org/html/rfc3280.html#page-106. +// Valid values of Type are "id-qt-unotice" and "id-qt-cps" +type CertificatePolicy struct { + ID OID + Qualifiers []CertificatePolicyQualifier +} + +// CertificatePolicyQualifier represents a single qualifier from an ASN.1 +// PolicyInformation structure. +type CertificatePolicyQualifier struct { + Type string + Value string +} + +// AuthRemote is an authenticated remote signer. +type AuthRemote struct { + RemoteName string `json:"remote"` + AuthKeyName string `json:"auth_key"` +} + +// CAConstraint specifies various CA constraints on the signed certificate. +// CAConstraint would verify against (and override) the CA +// extensions in the given CSR. +type CAConstraint struct { + IsCA bool `json:"is_ca"` + MaxPathLen int `json:"max_path_len"` + MaxPathLenZero bool `json:"max_path_len_zero"` +} + +// A SigningProfile stores information that the CA needs to store +// signature policy. +type SigningProfile struct { + Usage []string `json:"usages"` + IssuerURL []string `json:"issuer_urls"` + OCSP string `json:"ocsp_url"` + CRL string `json:"crl_url"` + CAConstraint CAConstraint `json:"ca_constraint"` + OCSPNoCheck bool `json:"ocsp_no_check"` + ExpiryString string `json:"expiry"` + BackdateString string `json:"backdate"` + AuthKeyName string `json:"auth_key"` + RemoteName string `json:"remote"` + NotBefore time.Time `json:"not_before"` + NotAfter time.Time `json:"not_after"` + NameWhitelistString string `json:"name_whitelist"` + AuthRemote AuthRemote `json:"auth_remote"` + CTLogServers []string `json:"ct_log_servers"` + AllowedExtensions []OID `json:"allowed_extensions"` + CertStore string `json:"cert_store"` + + Policies []CertificatePolicy + Expiry time.Duration + Backdate time.Duration + Provider auth.Provider + RemoteProvider auth.Provider + RemoteServer string + RemoteCAs *x509.CertPool + ClientCert *tls.Certificate + CSRWhitelist *CSRWhitelist + NameWhitelist *regexp.Regexp + ExtensionWhitelist map[string]bool + ClientProvidesSerialNumbers bool +} + +// UnmarshalJSON unmarshals a JSON string into an OID. +func (oid *OID) UnmarshalJSON(data []byte) (err error) { + if data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("OID JSON string not wrapped in quotes." + string(data)) + } + data = data[1 : len(data)-1] + parsedOid, err := parseObjectIdentifier(string(data)) + if err != nil { + return err + } + *oid = OID(parsedOid) + return +} + +// MarshalJSON marshals an oid into a JSON string. +func (oid OID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil +} + +func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) { + validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString) + if err != nil { + return + } + if !validOID { + err = errors.New("Invalid OID") + return + } + + segments := strings.Split(oidString, ".") + oid = make(asn1.ObjectIdentifier, len(segments)) + for i, intString := range segments { + oid[i], err = strconv.Atoi(intString) + if err != nil { + return + } + } + return +} + +const timeFormat = "2006-01-02T15:04:05" + +// populate is used to fill in the fields that are not in JSON +// +// First, the ExpiryString parameter is needed to parse +// expiration timestamps from JSON. The JSON decoder is not able to +// decode a string time duration to a time.Duration, so this is called +// when loading the configuration to properly parse and fill out the +// Expiry parameter. +// This function is also used to create references to the auth key +// and default remote for the profile. +// It returns true if ExpiryString is a valid representation of a +// time.Duration, and the AuthKeyString and RemoteName point to +// valid objects. It returns false otherwise. +func (p *SigningProfile) populate(cfg *Config) error { + if p == nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile")) + } + + var err error + if p.RemoteName == "" && p.AuthRemote.RemoteName == "" { + log.Debugf("parse expiry in profile") + if p.ExpiryString == "" { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string")) + } + + dur, err := time.ParseDuration(p.ExpiryString) + if err != nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) + } + + log.Debugf("expiry is valid") + p.Expiry = dur + + if p.BackdateString != "" { + dur, err = time.ParseDuration(p.BackdateString) + if err != nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) + } + + p.Backdate = dur + } + + if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) + } + + if len(p.Policies) > 0 { + for _, policy := range p.Policies { + for _, qualifier := range policy.Qualifiers { + if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("invalid policy qualifier type")) + } + } + } + } + } else if p.RemoteName != "" { + log.Debug("match remote in profile to remotes section") + if p.AuthRemote.RemoteName != "" { + log.Error("profile has both a remote and an auth remote specified") + return cferr.New(cferr.PolicyError, cferr.InvalidPolicy) + } + if remote := cfg.Remotes[p.RemoteName]; remote != "" { + if err := p.updateRemote(remote); err != nil { + return err + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find remote in remotes section")) + } + } else { + log.Debug("match auth remote in profile to remotes section") + if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" { + if err := p.updateRemote(remote); err != nil { + return err + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find remote in remotes section")) + } + } + + if p.AuthKeyName != "" { + log.Debug("match auth key in profile to auth_keys section") + if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true { + if key.Type == "standard" { + p.Provider, err = auth.New(key.Key, nil) + if err != nil { + log.Debugf("failed to create new standard auth provider: %v", err) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to create new standard auth provider")) + } + } else { + log.Debugf("unknown authentication type %v", key.Type) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("unknown authentication type")) + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find auth_key in auth_keys section")) + } + } + + if p.AuthRemote.AuthKeyName != "" { + log.Debug("match auth remote key in profile to auth_keys section") + if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true { + if key.Type == "standard" { + p.RemoteProvider, err = auth.New(key.Key, nil) + if err != nil { + log.Debugf("failed to create new standard auth provider: %v", err) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to create new standard auth provider")) + } + } else { + log.Debugf("unknown authentication type %v", key.Type) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("unknown authentication type")) + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find auth_remote's auth_key in auth_keys section")) + } + } + + if p.NameWhitelistString != "" { + log.Debug("compiling whitelist regular expression") + rule, err := regexp.Compile(p.NameWhitelistString) + if err != nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to compile name whitelist section")) + } + p.NameWhitelist = rule + } + + p.ExtensionWhitelist = map[string]bool{} + for _, oid := range p.AllowedExtensions { + p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true + } + + return nil +} + +// updateRemote takes a signing profile and initializes the remote server object +// to the hostname:port combination sent by remote. +func (p *SigningProfile) updateRemote(remote string) error { + if remote != "" { + p.RemoteServer = remote + } + return nil +} + +// OverrideRemotes takes a signing configuration and updates the remote server object +// to the hostname:port combination sent by remote +func (p *Signing) OverrideRemotes(remote string) error { + if remote != "" { + var err error + for _, profile := range p.Profiles { + err = profile.updateRemote(remote) + if err != nil { + return err + } + } + err = p.Default.updateRemote(remote) + if err != nil { + return err + } + } + return nil +} + +// SetClientCertKeyPairFromFile updates the properties to set client certificates for mutual +// authenticated TLS remote requests +func (p *Signing) SetClientCertKeyPairFromFile(certFile string, keyFile string) error { + if certFile != "" && keyFile != "" { + cert, err := helpers.LoadClientCertificate(certFile, keyFile) + if err != nil { + return err + } + for _, profile := range p.Profiles { + profile.ClientCert = cert + } + p.Default.ClientCert = cert + } + return nil +} + +// SetRemoteCAsFromFile reads root CAs from file and updates the properties to set remote CAs for TLS +// remote requests +func (p *Signing) SetRemoteCAsFromFile(caFile string) error { + if caFile != "" { + remoteCAs, err := helpers.LoadPEMCertPool(caFile) + if err != nil { + return err + } + p.SetRemoteCAs(remoteCAs) + } + return nil +} + +// SetRemoteCAs updates the properties to set remote CAs for TLS +// remote requests +func (p *Signing) SetRemoteCAs(remoteCAs *x509.CertPool) { + for _, profile := range p.Profiles { + profile.RemoteCAs = remoteCAs + } + p.Default.RemoteCAs = remoteCAs +} + +// NeedsRemoteSigner returns true if one of the profiles has a remote set +func (p *Signing) NeedsRemoteSigner() bool { + for _, profile := range p.Profiles { + if profile.RemoteServer != "" { + return true + } + } + + if p.Default.RemoteServer != "" { + return true + } + + return false +} + +// NeedsLocalSigner returns true if one of the profiles doe not have a remote set +func (p *Signing) NeedsLocalSigner() bool { + for _, profile := range p.Profiles { + if profile.RemoteServer == "" { + return true + } + } + + if p.Default.RemoteServer == "" { + return true + } + + return false +} + +// Usages parses the list of key uses in the profile, translating them +// to a list of X.509 key usages and extended key usages. The unknown +// uses are collected into a slice that is also returned. +func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) { + for _, keyUse := range p.Usage { + if kuse, ok := KeyUsage[keyUse]; ok { + ku |= kuse + } else if ekuse, ok := ExtKeyUsage[keyUse]; ok { + eku = append(eku, ekuse) + } else { + unk = append(unk, keyUse) + } + } + return +} + +// A valid profile must be a valid local profile or a valid remote profile. +// A valid local profile has defined at least key usages to be used, and a +// valid local default profile has defined at least a default expiration. +// A valid remote profile (default or not) has remote signer initialized. +// In addition, a remote profile must has a valid auth provider if auth +// key defined. +func (p *SigningProfile) validProfile(isDefault bool) bool { + if p == nil { + return false + } + + if p.AuthRemote.RemoteName == "" && p.AuthRemote.AuthKeyName != "" { + log.Debugf("invalid auth remote profile: no remote signer specified") + return false + } + + if p.RemoteName != "" { + log.Debugf("validate remote profile") + + if p.RemoteServer == "" { + log.Debugf("invalid remote profile: no remote signer specified") + return false + } + + if p.AuthKeyName != "" && p.Provider == nil { + log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set") + return false + } + + if p.AuthRemote.RemoteName != "" { + log.Debugf("invalid remote profile: auth remote is also specified") + return false + } + } else if p.AuthRemote.RemoteName != "" { + log.Debugf("validate auth remote profile") + if p.RemoteServer == "" { + log.Debugf("invalid auth remote profile: no remote signer specified") + return false + } + + if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil { + log.Debugf("invalid auth remote profile: no auth key is defined") + return false + } + } else { + log.Debugf("validate local profile") + if !isDefault { + if len(p.Usage) == 0 { + log.Debugf("invalid local profile: no usages specified") + return false + } else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) { + log.Debugf("invalid local profile: no valid usages") + return false + } + } else { + if p.Expiry == 0 { + log.Debugf("invalid local profile: no expiry set") + return false + } + } + } + + log.Debugf("profile is valid") + return true +} + +// This checks if the SigningProfile object contains configurations that are only effective with a local signer +// which has access to CA private key. +func (p *SigningProfile) hasLocalConfig() bool { + if p.Usage != nil || + p.IssuerURL != nil || + p.OCSP != "" || + p.ExpiryString != "" || + p.BackdateString != "" || + p.CAConstraint.IsCA != false || + !p.NotBefore.IsZero() || + !p.NotAfter.IsZero() || + p.NameWhitelistString != "" || + len(p.CTLogServers) != 0 { + return true + } + return false +} + +// warnSkippedSettings prints a log warning message about skipped settings +// in a SigningProfile, usually due to remote signer. +func (p *Signing) warnSkippedSettings() { + const warningMessage = `The configuration value by "usages", "issuer_urls", "ocsp_url", "crl_url", "ca_constraint", "expiry", "backdate", "not_before", "not_after", "cert_store" and "ct_log_servers" are skipped` + if p == nil { + return + } + + if (p.Default.RemoteName != "" || p.Default.AuthRemote.RemoteName != "") && p.Default.hasLocalConfig() { + log.Warning("default profile points to a remote signer: ", warningMessage) + } + + for name, profile := range p.Profiles { + if (profile.RemoteName != "" || profile.AuthRemote.RemoteName != "") && profile.hasLocalConfig() { + log.Warningf("Profiles[%s] points to a remote signer: %s", name, warningMessage) + } + } +} + +// Signing codifies the signature configuration policy for a CA. +type Signing struct { + Profiles map[string]*SigningProfile `json:"profiles"` + Default *SigningProfile `json:"default"` +} + +// Config stores configuration information for the CA. +type Config struct { + Signing *Signing `json:"signing"` + OCSP *ocspConfig.Config `json:"ocsp"` + AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"` + Remotes map[string]string `json:"remotes,omitempty"` +} + +// Valid ensures that Config is a valid configuration. It should be +// called immediately after parsing a configuration file. +func (c *Config) Valid() bool { + return c.Signing.Valid() +} + +// Valid checks the signature policies, ensuring they are valid +// policies. A policy is valid if it has defined at least key usages +// to be used, and a valid default profile has defined at least a +// default expiration. +func (p *Signing) Valid() bool { + if p == nil { + return false + } + + log.Debugf("validating configuration") + if !p.Default.validProfile(true) { + log.Debugf("default profile is invalid") + return false + } + + for _, sp := range p.Profiles { + if !sp.validProfile(false) { + log.Debugf("invalid profile") + return false + } + } + + p.warnSkippedSettings() + + return true +} + +// KeyUsage contains a mapping of string names to key usages. +var KeyUsage = map[string]x509.KeyUsage{ + "signing": x509.KeyUsageDigitalSignature, + "digital signature": x509.KeyUsageDigitalSignature, + "content committment": x509.KeyUsageContentCommitment, + "key encipherment": x509.KeyUsageKeyEncipherment, + "key agreement": x509.KeyUsageKeyAgreement, + "data encipherment": x509.KeyUsageDataEncipherment, + "cert sign": x509.KeyUsageCertSign, + "crl sign": x509.KeyUsageCRLSign, + "encipher only": x509.KeyUsageEncipherOnly, + "decipher only": x509.KeyUsageDecipherOnly, +} + +// ExtKeyUsage contains a mapping of string names to extended key +// usages. +var ExtKeyUsage = map[string]x509.ExtKeyUsage{ + "any": x509.ExtKeyUsageAny, + "server auth": x509.ExtKeyUsageServerAuth, + "client auth": x509.ExtKeyUsageClientAuth, + "code signing": x509.ExtKeyUsageCodeSigning, + "email protection": x509.ExtKeyUsageEmailProtection, + "s/mime": x509.ExtKeyUsageEmailProtection, + "ipsec end system": x509.ExtKeyUsageIPSECEndSystem, + "ipsec tunnel": x509.ExtKeyUsageIPSECTunnel, + "ipsec user": x509.ExtKeyUsageIPSECUser, + "timestamping": x509.ExtKeyUsageTimeStamping, + "ocsp signing": x509.ExtKeyUsageOCSPSigning, + "microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto, + "netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto, +} + +// An AuthKey contains an entry for a key used for authentication. +type AuthKey struct { + // Type contains information needed to select the appropriate + // constructor. For example, "standard" for HMAC-SHA-256, + // "standard-ip" for HMAC-SHA-256 incorporating the client's + // IP. + Type string `json:"type"` + // Key contains the key information, such as a hex-encoded + // HMAC key. + Key string `json:"key"` +} + +// DefaultConfig returns a default configuration specifying basic key +// usage and a 1 year expiration time. The key usages chosen are +// signing, key encipherment, client auth and server auth. +func DefaultConfig() *SigningProfile { + d := helpers.OneYear + return &SigningProfile{ + Usage: []string{"signing", "key encipherment", "server auth", "client auth"}, + Expiry: d, + ExpiryString: "8760h", + } +} + +// LoadFile attempts to load the configuration file stored at the path +// and returns the configuration. On error, it returns nil. +func LoadFile(path string) (*Config, error) { + log.Debugf("loading configuration file from %s", path) + if path == "" { + return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path")) + } + + body, err := ioutil.ReadFile(path) + if err != nil { + return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file")) + } + + return LoadConfig(body) +} + +// LoadConfig attempts to load the configuration from a byte slice. +// On error, it returns nil. +func LoadConfig(config []byte) (*Config, error) { + var cfg = &Config{} + err := json.Unmarshal(config, &cfg) + if err != nil { + return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to unmarshal configuration: "+err.Error())) + } + + if cfg.Signing == nil { + return nil, errors.New("No \"signing\" field present") + } + + if cfg.Signing.Default == nil { + log.Debugf("no default given: using default config") + cfg.Signing.Default = DefaultConfig() + } else { + if err := cfg.Signing.Default.populate(cfg); err != nil { + return nil, err + } + } + + for k := range cfg.Signing.Profiles { + if err := cfg.Signing.Profiles[k].populate(cfg); err != nil { + return nil, err + } + } + + if !cfg.Valid() { + return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration")) + } + + log.Debugf("configuration ok") + return cfg, nil +} diff --git a/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go b/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go new file mode 100644 index 0000000..8db547f --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go @@ -0,0 +1,188 @@ +// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically +// used to package certificates and CRLs. Using openssl, every certificate converted +// to PKCS #7 format from another encoding such as PEM conforms to this implementation. +// reference: https://www.openssl.org/docs/apps/crl2pkcs7.html +// +// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315 +// +// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements, +// for example data can be encrypted and signed and then packaged through pkcs#7 to be +// sent over a network and then verified and decrypted. It is asn1, and the type of +// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is: +// +// ContentInfo ::= SEQUENCE { +// contentType ContentType, +// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL +// } +// +// There are 6 possible ContentTypes, data, signedData, envelopedData, +// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted +// Data are implemented, as the degenerate case of signedData without a signature is the typical +// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12 +// formats. +// The ContentType signedData has the form: +// +// +// signedData ::= SEQUENCE { +// version Version, +// digestAlgorithms DigestAlgorithmIdentifiers, +// contentInfo ContentInfo, +// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL +// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL, +// signerInfos SignerInfos +// } +// +// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to +// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is +// recursive, this second layer of ContentInfo is similar ignored for our degenerate +// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices +// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting +// of any number of extended certificates is not yet supported in this implementation. +// +// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice. +// +// The ContentType encryptedData is the most complicated and its form can be gathered by +// the go type below. It essentially contains a raw octet string of encrypted data and an +// algorithm identifier for use in decrypting this data. +package pkcs7 + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + cferr "github.com/cloudflare/cfssl/errors" +) + +// Types used for asn1 Unmarshaling. + +type signedData struct { + Version int + DigestAlgorithms asn1.RawValue + ContentInfo asn1.RawValue + Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"` + Crls asn1.RawValue `asn1:"optional"` + SignerInfos asn1.RawValue +} + +type initPKCS7 struct { + Raw asn1.RawContent + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +// Object identifier strings of the three implemented PKCS7 types. +const ( + ObjIDData = "1.2.840.113549.1.7.1" + ObjIDSignedData = "1.2.840.113549.1.7.2" + ObjIDEncryptedData = "1.2.840.113549.1.7.6" +) + +// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three +// possible types of Content objects, as denoted by the object identifier in +// the ContentInfo field, the other two being nil. SignedData +// is the degenerate SignedData Content info without signature used +// to hold certificates and crls. Data is raw bytes, and EncryptedData +// is as defined in PKCS #7 standard. +type PKCS7 struct { + Raw asn1.RawContent + ContentInfo string + Content Content +} + +// Content implements three of the six possible PKCS7 data types. Only one is non-nil. +type Content struct { + Data []byte + SignedData SignedData + EncryptedData EncryptedData +} + +// SignedData defines the typical carrier of certificates and crls. +type SignedData struct { + Raw asn1.RawContent + Version int + Certificates []*x509.Certificate + Crl *pkix.CertificateList +} + +// Data contains raw bytes. Used as a subtype in PKCS12. +type Data struct { + Bytes []byte +} + +// EncryptedData contains encrypted data. Used as a subtype in PKCS12. +type EncryptedData struct { + Raw asn1.RawContent + Version int + EncryptedContentInfo EncryptedContentInfo +} + +// EncryptedContentInfo is a subtype of PKCS7EncryptedData. +type EncryptedContentInfo struct { + Raw asn1.RawContent + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +// ParsePKCS7 attempts to parse the DER encoded bytes of a +// PKCS7 structure. +func ParsePKCS7(raw []byte) (msg *PKCS7, err error) { + + var pkcs7 initPKCS7 + _, err = asn1.Unmarshal(raw, &pkcs7) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + + msg = new(PKCS7) + msg.Raw = pkcs7.Raw + msg.ContentInfo = pkcs7.ContentType.String() + switch { + case msg.ContentInfo == ObjIDData: + msg.ContentInfo = "Data" + _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + case msg.ContentInfo == ObjIDSignedData: + msg.ContentInfo = "SignedData" + var signedData signedData + _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + if len(signedData.Certificates.Bytes) != 0 { + msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + } + if len(signedData.Crls.Bytes) != 0 { + msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + } + msg.Content.SignedData.Version = signedData.Version + msg.Content.SignedData.Raw = pkcs7.Content.Bytes + case msg.ContentInfo == ObjIDEncryptedData: + msg.ContentInfo = "EncryptedData" + var encryptedData EncryptedData + _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + if encryptedData.Version != 0 { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0")) + } + msg.Content.EncryptedData = encryptedData + + default: + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data")) + } + + return msg, nil + +} diff --git a/vendor/github.com/cloudflare/cfssl/csr/csr.go b/vendor/github.com/cloudflare/cfssl/csr/csr.go new file mode 100644 index 0000000..4329b79 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/csr/csr.go @@ -0,0 +1,431 @@ +// Package csr implements certificate requests for CFSSL. +package csr + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "errors" + "net" + "net/mail" + "strings" + + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/log" +) + +const ( + curveP256 = 256 + curveP384 = 384 + curveP521 = 521 +) + +// A Name contains the SubjectInfo fields. +type Name struct { + C string // Country + ST string // State + L string // Locality + O string // OrganisationName + OU string // OrganisationalUnitName + SerialNumber string +} + +// A KeyRequest is a generic request for a new key. +type KeyRequest interface { + Algo() string + Size() int + Generate() (crypto.PrivateKey, error) + SigAlgo() x509.SignatureAlgorithm +} + +// A BasicKeyRequest contains the algorithm and key size for a new private key. +type BasicKeyRequest struct { + A string `json:"algo"` + S int `json:"size"` +} + +// NewBasicKeyRequest returns a default BasicKeyRequest. +func NewBasicKeyRequest() *BasicKeyRequest { + return &BasicKeyRequest{"ecdsa", curveP256} +} + +// Algo returns the requested key algorithm represented as a string. +func (kr *BasicKeyRequest) Algo() string { + return kr.A +} + +// Size returns the requested key size. +func (kr *BasicKeyRequest) Size() int { + return kr.S +} + +// Generate generates a key as specified in the request. Currently, +// only ECDSA and RSA are supported. +func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) { + log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size()) + switch kr.Algo() { + case "rsa": + if kr.Size() < 2048 { + return nil, errors.New("RSA key is too weak") + } + if kr.Size() > 8192 { + return nil, errors.New("RSA key size too large") + } + return rsa.GenerateKey(rand.Reader, kr.Size()) + case "ecdsa": + var curve elliptic.Curve + switch kr.Size() { + case curveP256: + curve = elliptic.P256() + case curveP384: + curve = elliptic.P384() + case curveP521: + curve = elliptic.P521() + default: + return nil, errors.New("invalid curve") + } + return ecdsa.GenerateKey(curve, rand.Reader) + default: + return nil, errors.New("invalid algorithm") + } +} + +// SigAlgo returns an appropriate X.509 signature algorithm given the +// key request's type and size. +func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm { + switch kr.Algo() { + case "rsa": + switch { + case kr.Size() >= 4096: + return x509.SHA512WithRSA + case kr.Size() >= 3072: + return x509.SHA384WithRSA + case kr.Size() >= 2048: + return x509.SHA256WithRSA + default: + return x509.SHA1WithRSA + } + case "ecdsa": + switch kr.Size() { + case curveP521: + return x509.ECDSAWithSHA512 + case curveP384: + return x509.ECDSAWithSHA384 + case curveP256: + return x509.ECDSAWithSHA256 + default: + return x509.ECDSAWithSHA1 + } + default: + return x509.UnknownSignatureAlgorithm + } +} + +// CAConfig is a section used in the requests initialising a new CA. +type CAConfig struct { + PathLength int `json:"pathlen"` + PathLenZero bool `json:"pathlenzero"` + Expiry string `json:"expiry"` +} + +// A CertificateRequest encapsulates the API interface to the +// certificate request functionality. +type CertificateRequest struct { + CN string + Names []Name `json:"names"` + Hosts []string `json:"hosts"` + KeyRequest KeyRequest `json:"key,omitempty"` + CA *CAConfig `json:"ca,omitempty"` + SerialNumber string `json:"serialnumber,omitempty"` +} + +// New returns a new, empty CertificateRequest with a +// BasicKeyRequest. +func New() *CertificateRequest { + return &CertificateRequest{ + KeyRequest: NewBasicKeyRequest(), + } +} + +// appendIf appends to a if s is not an empty string. +func appendIf(s string, a *[]string) { + if s != "" { + *a = append(*a, s) + } +} + +// Name returns the PKIX name for the request. +func (cr *CertificateRequest) Name() pkix.Name { + var name pkix.Name + name.CommonName = cr.CN + + for _, n := range cr.Names { + appendIf(n.C, &name.Country) + appendIf(n.ST, &name.Province) + appendIf(n.L, &name.Locality) + appendIf(n.O, &name.Organization) + appendIf(n.OU, &name.OrganizationalUnit) + } + name.SerialNumber = cr.SerialNumber + return name +} + +// BasicConstraints CSR information RFC 5280, 4.2.1.9 +type BasicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` +} + +// ParseRequest takes a certificate request and generates a key and +// CSR from it. It does no validation -- caveat emptor. It will, +// however, fail if the key request is not valid (i.e., an unsupported +// curve or RSA key size). The lack of validation was specifically +// chosen to allow the end user to define a policy and validate the +// request appropriately before calling this function. +func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) { + log.Info("received CSR") + if req.KeyRequest == nil { + req.KeyRequest = NewBasicKeyRequest() + } + + log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size()) + priv, err := req.KeyRequest.Generate() + if err != nil { + err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err) + return + } + + switch priv := priv.(type) { + case *rsa.PrivateKey: + key = x509.MarshalPKCS1PrivateKey(priv) + block := pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: key, + } + key = pem.EncodeToMemory(&block) + case *ecdsa.PrivateKey: + key, err = x509.MarshalECPrivateKey(priv) + if err != nil { + err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err) + return + } + block := pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: key, + } + key = pem.EncodeToMemory(&block) + default: + panic("Generate should have failed to produce a valid key.") + } + + csr, err = Generate(priv.(crypto.Signer), req) + if err != nil { + log.Errorf("failed to generate a CSR: %v", err) + err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err) + } + return +} + +// ExtractCertificateRequest extracts a CertificateRequest from +// x509.Certificate. It is aimed to used for generating a new certificate +// from an existing certificate. For a root certificate, the CA expiry +// length is calculated as the duration between cert.NotAfter and cert.NotBefore. +func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest { + req := New() + req.CN = cert.Subject.CommonName + req.Names = getNames(cert.Subject) + req.Hosts = getHosts(cert) + req.SerialNumber = cert.Subject.SerialNumber + + if cert.IsCA { + req.CA = new(CAConfig) + // CA expiry length is calculated based on the input cert + // issue date and expiry date. + req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String() + req.CA.PathLength = cert.MaxPathLen + req.CA.PathLenZero = cert.MaxPathLenZero + } + + return req +} + +func getHosts(cert *x509.Certificate) []string { + var hosts []string + for _, ip := range cert.IPAddresses { + hosts = append(hosts, ip.String()) + } + for _, dns := range cert.DNSNames { + hosts = append(hosts, dns) + } + for _, email := range cert.EmailAddresses { + hosts = append(hosts, email) + } + + return hosts +} + +// getNames returns an array of Names from the certificate +// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province +func getNames(sub pkix.Name) []Name { + // anonymous func for finding the max of a list of interger + max := func(v1 int, vn ...int) (max int) { + max = v1 + for i := 0; i < len(vn); i++ { + if vn[i] > max { + max = vn[i] + } + } + return max + } + + nc := len(sub.Country) + norg := len(sub.Organization) + nou := len(sub.OrganizationalUnit) + nl := len(sub.Locality) + np := len(sub.Province) + + n := max(nc, norg, nou, nl, np) + + names := make([]Name, n) + for i := range names { + if i < nc { + names[i].C = sub.Country[i] + } + if i < norg { + names[i].O = sub.Organization[i] + } + if i < nou { + names[i].OU = sub.OrganizationalUnit[i] + } + if i < nl { + names[i].L = sub.Locality[i] + } + if i < np { + names[i].ST = sub.Province[i] + } + } + return names +} + +// A Generator is responsible for validating certificate requests. +type Generator struct { + Validator func(*CertificateRequest) error +} + +// ProcessRequest validates and processes the incoming request. It is +// a wrapper around a validator and the ParseRequest function. +func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) { + + log.Info("generate received request") + err = g.Validator(req) + if err != nil { + log.Warningf("invalid request: %v", err) + return + } + + csr, key, err = ParseRequest(req) + if err != nil { + return nil, nil, err + } + return +} + +// IsNameEmpty returns true if the name has no identifying information in it. +func IsNameEmpty(n Name) bool { + empty := func(s string) bool { return strings.TrimSpace(s) == "" } + + if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) { + return true + } + return false +} + +// Regenerate uses the provided CSR as a template for signing a new +// CSR using priv. +func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) { + req, extra, err := helpers.ParseCSR(csr) + if err != nil { + return nil, err + } else if len(extra) > 0 { + return nil, errors.New("csr: trailing data in certificate request") + } + + return x509.CreateCertificateRequest(rand.Reader, req, priv) +} + +// Generate creates a new CSR from a CertificateRequest structure and +// an existing key. The KeyRequest field is ignored. +func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) { + sigAlgo := helpers.SignerAlgo(priv) + if sigAlgo == x509.UnknownSignatureAlgorithm { + return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable) + } + + var tpl = x509.CertificateRequest{ + Subject: req.Name(), + SignatureAlgorithm: sigAlgo, + } + + for i := range req.Hosts { + if ip := net.ParseIP(req.Hosts[i]); ip != nil { + tpl.IPAddresses = append(tpl.IPAddresses, ip) + } else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil { + tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address) + } else { + tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i]) + } + } + + if req.CA != nil { + err = appendCAInfoToCSR(req.CA, &tpl) + if err != nil { + err = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err) + return + } + } + + csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv) + if err != nil { + log.Errorf("failed to generate a CSR: %v", err) + err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err) + return + } + block := pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + } + + log.Info("encoded CSR") + csr = pem.EncodeToMemory(&block) + return +} + +// appendCAInfoToCSR appends CAConfig BasicConstraint extension to a CSR +func appendCAInfoToCSR(reqConf *CAConfig, csr *x509.CertificateRequest) error { + pathlen := reqConf.PathLength + if pathlen == 0 && !reqConf.PathLenZero { + pathlen = -1 + } + val, err := asn1.Marshal(BasicConstraints{true, pathlen}) + + if err != nil { + return err + } + + csr.ExtraExtensions = []pkix.Extension{ + { + Id: asn1.ObjectIdentifier{2, 5, 29, 19}, + Value: val, + Critical: true, + }, + } + + return nil +} diff --git a/vendor/github.com/cloudflare/cfssl/errors/doc.go b/vendor/github.com/cloudflare/cfssl/errors/doc.go new file mode 100644 index 0000000..1910e26 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/errors/doc.go @@ -0,0 +1,46 @@ +/* +Package errors provides error types returned in CF SSL. + +1. Type Error is intended for errors produced by CF SSL packages. +It formats to a json object that consists of an error message and a 4-digit code for error reasoning. + +Example: {"code":1002, "message": "Failed to decode certificate"} + +The index of codes are listed below: + 1XXX: CertificateError + 1000: Unknown + 1001: ReadFailed + 1002: DecodeFailed + 1003: ParseFailed + 1100: SelfSigned + 12XX: VerifyFailed + 121X: CertificateInvalid + 1210: NotAuthorizedToSign + 1211: Expired + 1212: CANotAuthorizedForThisName + 1213: TooManyIntermediates + 1214: IncompatibleUsage + 1220: UnknownAuthority + 2XXX: PrivatekeyError + 2000: Unknown + 2001: ReadFailed + 2002: DecodeFailed + 2003: ParseFailed + 2100: Encrypted + 2200: NotRSA + 2300: KeyMismatch + 2400: GenerationFailed + 2500: Unavailable + 3XXX: IntermediatesError + 4XXX: RootError + 5XXX: PolicyError + 5100: NoKeyUsages + 5200: InvalidPolicy + 5300: InvalidRequest + 5400: UnknownProfile + 6XXX: DialError + +2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned +by the API server. +*/ +package errors diff --git a/vendor/github.com/cloudflare/cfssl/errors/error.go b/vendor/github.com/cloudflare/cfssl/errors/error.go new file mode 100644 index 0000000..9913a84 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/errors/error.go @@ -0,0 +1,424 @@ +package errors + +import ( + "crypto/x509" + "encoding/json" + "fmt" +) + +// Error is the error type usually returned by functions in CF SSL package. +// It contains a 4-digit error code where the most significant digit +// describes the category where the error occurred and the rest 3 digits +// describe the specific error reason. +type Error struct { + ErrorCode int `json:"code"` + Message string `json:"message"` +} + +// Category is the most significant digit of the error code. +type Category int + +// Reason is the last 3 digits of the error code. +type Reason int + +const ( + // Success indicates no error occurred. + Success Category = 1000 * iota // 0XXX + + // CertificateError indicates a fault in a certificate. + CertificateError // 1XXX + + // PrivateKeyError indicates a fault in a private key. + PrivateKeyError // 2XXX + + // IntermediatesError indicates a fault in an intermediate. + IntermediatesError // 3XXX + + // RootError indicates a fault in a root. + RootError // 4XXX + + // PolicyError indicates an error arising from a malformed or + // non-existent policy, or a breach of policy. + PolicyError // 5XXX + + // DialError indicates a network fault. + DialError // 6XXX + + // APIClientError indicates a problem with the API client. + APIClientError // 7XXX + + // OCSPError indicates a problem with OCSP signing + OCSPError // 8XXX + + // CSRError indicates a problem with CSR parsing + CSRError // 9XXX + + // CTError indicates a problem with the certificate transparency process + CTError // 10XXX + + // CertStoreError indicates a problem with the certificate store + CertStoreError // 11XXX +) + +// None is a non-specified error. +const ( + None Reason = iota +) + +// Warning code for a success +const ( + BundleExpiringBit int = 1 << iota // 0x01 + BundleNotUbiquitousBit // 0x02 +) + +// Parsing errors +const ( + Unknown Reason = iota // X000 + ReadFailed // X001 + DecodeFailed // X002 + ParseFailed // X003 +) + +// The following represent certificate non-parsing errors, and must be +// specified along with CertificateError. +const ( + // SelfSigned indicates that a certificate is self-signed and + // cannot be used in the manner being attempted. + SelfSigned Reason = 100 * (iota + 1) // Code 11XX + + // VerifyFailed is an X.509 verification failure. The least two + // significant digits of 12XX is determined as the actual x509 + // error is examined. + VerifyFailed // Code 12XX + + // BadRequest indicates that the certificate request is invalid. + BadRequest // Code 13XX + + // MissingSerial indicates that the profile specified + // 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial + // number. + MissingSerial // Code 14XX +) + +const ( + certificateInvalid = 10 * (iota + 1) //121X + unknownAuthority //122x +) + +// The following represent private-key non-parsing errors, and must be +// specified with PrivateKeyError. +const ( + // Encrypted indicates that the private key is a PKCS #8 encrypted + // private key. At this time, CFSSL does not support decrypting + // these keys. + Encrypted Reason = 100 * (iota + 1) //21XX + + // NotRSAOrECC indicates that they key is not an RSA or ECC + // private key; these are the only two private key types supported + // at this time by CFSSL. + NotRSAOrECC //22XX + + // KeyMismatch indicates that the private key does not match + // the public key or certificate being presented with the key. + KeyMismatch //23XX + + // GenerationFailed indicates that a private key could not + // be generated. + GenerationFailed //24XX + + // Unavailable indicates that a private key mechanism (such as + // PKCS #11) was requested but support for that mechanism is + // not available. + Unavailable +) + +// The following are policy-related non-parsing errors, and must be +// specified along with PolicyError. +const ( + // NoKeyUsages indicates that the profile does not permit any + // key usages for the certificate. + NoKeyUsages Reason = 100 * (iota + 1) // 51XX + + // InvalidPolicy indicates that policy being requested is not + // a valid policy or does not exist. + InvalidPolicy // 52XX + + // InvalidRequest indicates a certificate request violated the + // constraints of the policy being applied to the request. + InvalidRequest // 53XX + + // UnknownProfile indicates that the profile does not exist. + UnknownProfile // 54XX + + UnmatchedWhitelist // 55xx +) + +// The following are API client related errors, and should be +// specified with APIClientError. +const ( + // AuthenticationFailure occurs when the client is unable + // to obtain an authentication token for the request. + AuthenticationFailure Reason = 100 * (iota + 1) + + // JSONError wraps an encoding/json error. + JSONError + + // IOError wraps an io/ioutil error. + IOError + + // ClientHTTPError wraps a net/http error. + ClientHTTPError + + // ServerRequestFailed covers any other failures from the API + // client. + ServerRequestFailed +) + +// The following are OCSP related errors, and should be +// specified with OCSPError +const ( + // IssuerMismatch ocurs when the certificate in the OCSP signing + // request was not issued by the CA that this responder responds for. + IssuerMismatch Reason = 100 * (iota + 1) // 81XX + + // InvalidStatus occurs when the OCSP signing requests includes an + // invalid value for the certificate status. + InvalidStatus +) + +// Certificate transparency related errors specified with CTError +const ( + // PrecertSubmissionFailed occurs when submitting a precertificate to + // a log server fails + PrecertSubmissionFailed = 100 * (iota + 1) +) + +// Certificate persistence related errors specified with CertStoreError +const ( + // InsertionFailed occurs when a SQL insert query failes to complete. + InsertionFailed = 100 * (iota + 1) + // RecordNotFound occurs when a SQL query targeting on one unique + // record failes to update the specified row in the table. + RecordNotFound +) + +// The error interface implementation, which formats to a JSON object string. +func (e *Error) Error() string { + marshaled, err := json.Marshal(e) + if err != nil { + panic(err) + } + return string(marshaled) + +} + +// New returns an error that contains an error code and message derived from +// the given category, reason. Currently, to avoid confusion, it is not +// allowed to create an error of category Success +func New(category Category, reason Reason) *Error { + errorCode := int(category) + int(reason) + var msg string + switch category { + case OCSPError: + switch reason { + case ReadFailed: + msg = "No certificate provided" + case IssuerMismatch: + msg = "Certificate not issued by this issuer" + case InvalidStatus: + msg = "Invalid revocation status" + } + case CertificateError: + switch reason { + case Unknown: + msg = "Unknown certificate error" + case ReadFailed: + msg = "Failed to read certificate" + case DecodeFailed: + msg = "Failed to decode certificate" + case ParseFailed: + msg = "Failed to parse certificate" + case SelfSigned: + msg = "Certificate is self signed" + case VerifyFailed: + msg = "Unable to verify certificate" + case BadRequest: + msg = "Invalid certificate request" + case MissingSerial: + msg = "Missing serial number in request" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.", + reason)) + + } + case PrivateKeyError: + switch reason { + case Unknown: + msg = "Unknown private key error" + case ReadFailed: + msg = "Failed to read private key" + case DecodeFailed: + msg = "Failed to decode private key" + case ParseFailed: + msg = "Failed to parse private key" + case Encrypted: + msg = "Private key is encrypted." + case NotRSAOrECC: + msg = "Private key algorithm is not RSA or ECC" + case KeyMismatch: + msg = "Private key does not match public key" + case GenerationFailed: + msg = "Failed to new private key" + case Unavailable: + msg = "Private key is unavailable" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.", + reason)) + } + case IntermediatesError: + switch reason { + case Unknown: + msg = "Unknown intermediate certificate error" + case ReadFailed: + msg = "Failed to read intermediate certificate" + case DecodeFailed: + msg = "Failed to decode intermediate certificate" + case ParseFailed: + msg = "Failed to parse intermediate certificate" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.", + reason)) + } + case RootError: + switch reason { + case Unknown: + msg = "Unknown root certificate error" + case ReadFailed: + msg = "Failed to read root certificate" + case DecodeFailed: + msg = "Failed to decode root certificate" + case ParseFailed: + msg = "Failed to parse root certificate" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.", + reason)) + } + case PolicyError: + switch reason { + case Unknown: + msg = "Unknown policy error" + case NoKeyUsages: + msg = "Invalid policy: no key usage available" + case InvalidPolicy: + msg = "Invalid or unknown policy" + case InvalidRequest: + msg = "Policy violation request" + case UnknownProfile: + msg = "Unknown policy profile" + case UnmatchedWhitelist: + msg = "Request does not match policy whitelist" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.", + reason)) + } + case DialError: + switch reason { + case Unknown: + msg = "Failed to dial remote server" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.", + reason)) + } + case APIClientError: + switch reason { + case AuthenticationFailure: + msg = "API client authentication failure" + case JSONError: + msg = "API client JSON config error" + case ClientHTTPError: + msg = "API client HTTP error" + case IOError: + msg = "API client IO error" + case ServerRequestFailed: + msg = "API client error: Server request failed" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.", + reason)) + } + case CSRError: + switch reason { + case Unknown: + msg = "CSR parsing failed due to unknown error" + case ReadFailed: + msg = "CSR file read failed" + case ParseFailed: + msg = "CSR Parsing failed" + case DecodeFailed: + msg = "CSR Decode failed" + case BadRequest: + msg = "CSR Bad request" + default: + panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason)) + } + case CTError: + switch reason { + case Unknown: + msg = "Certificate transparency parsing failed due to unknown error" + case PrecertSubmissionFailed: + msg = "Certificate transparency precertificate submission failed" + default: + panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason)) + } + case CertStoreError: + switch reason { + case Unknown: + msg = "Certificate store action failed due to unknown error" + default: + panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason)) + } + + default: + panic(fmt.Sprintf("Unsupported CFSSL error type: %d.", + category)) + } + return &Error{ErrorCode: errorCode, Message: msg} +} + +// Wrap returns an error that contains the given error and an error code derived from +// the given category, reason and the error. Currently, to avoid confusion, it is not +// allowed to create an error of category Success +func Wrap(category Category, reason Reason, err error) *Error { + errorCode := int(category) + int(reason) + if err == nil { + panic("Wrap needs a supplied error to initialize.") + } + + // do not double wrap a error + switch err.(type) { + case *Error: + panic("Unable to wrap a wrapped error.") + } + + switch category { + case CertificateError: + // given VerifyFailed , report the status with more detailed status code + // for some certificate errors we care. + if reason == VerifyFailed { + switch errorType := err.(type) { + case x509.CertificateInvalidError: + errorCode += certificateInvalid + int(errorType.Reason) + case x509.UnknownAuthorityError: + errorCode += unknownAuthority + } + } + case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError, + APIClientError, CSRError, CTError, CertStoreError: + // no-op, just use the error + default: + panic(fmt.Sprintf("Unsupported CFSSL error type: %d.", + category)) + } + + return &Error{ErrorCode: errorCode, Message: err.Error()} + +} diff --git a/vendor/github.com/cloudflare/cfssl/errors/http.go b/vendor/github.com/cloudflare/cfssl/errors/http.go new file mode 100644 index 0000000..c9c0a39 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/errors/http.go @@ -0,0 +1,47 @@ +package errors + +import ( + "errors" + "net/http" +) + +// HTTPError is an augmented error with a HTTP status code. +type HTTPError struct { + StatusCode int + error +} + +// Error implements the error interface. +func (e *HTTPError) Error() string { + return e.error.Error() +} + +// NewMethodNotAllowed returns an appropriate error in the case that +// an HTTP client uses an invalid method (i.e. a GET in place of a POST) +// on an API endpoint. +func NewMethodNotAllowed(method string) *HTTPError { + return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)} +} + +// NewBadRequest creates a HttpError with the given error and error code 400. +func NewBadRequest(err error) *HTTPError { + return &HTTPError{http.StatusBadRequest, err} +} + +// NewBadRequestString returns a HttpError with the supplied message +// and error code 400. +func NewBadRequestString(s string) *HTTPError { + return NewBadRequest(errors.New(s)) +} + +// NewBadRequestMissingParameter returns a 400 HttpError as a required +// parameter is missing in the HTTP request. +func NewBadRequestMissingParameter(s string) *HTTPError { + return NewBadRequestString(`Missing parameter "` + s + `"`) +} + +// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary +// parameter is present in the HTTP request. +func NewBadRequestUnwantedParameter(s string) *HTTPError { + return NewBadRequestString(`Unwanted parameter "` + s + `"`) +} diff --git a/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go b/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go new file mode 100644 index 0000000..bcc7418 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go @@ -0,0 +1,42 @@ +// Package derhelpers implements common functionality +// on DER encoded data +package derhelpers + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + + cferr "github.com/cloudflare/cfssl/errors" +) + +// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, or elliptic curve +// DER-encoded private key. The key must not be in PEM format. +func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) { + generalKey, err := x509.ParsePKCS8PrivateKey(keyDER) + if err != nil { + generalKey, err = x509.ParsePKCS1PrivateKey(keyDER) + if err != nil { + generalKey, err = x509.ParseECPrivateKey(keyDER) + if err != nil { + // We don't include the actual error into + // the final error. The reason might be + // we don't want to leak any info about + // the private key. + return nil, cferr.New(cferr.PrivateKeyError, + cferr.ParseFailed) + } + } + } + + switch generalKey.(type) { + case *rsa.PrivateKey: + return generalKey.(*rsa.PrivateKey), nil + case *ecdsa.PrivateKey: + return generalKey.(*ecdsa.PrivateKey), nil + } + + // should never reach here + return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed) +} diff --git a/vendor/github.com/cloudflare/cfssl/helpers/helpers.go b/vendor/github.com/cloudflare/cfssl/helpers/helpers.go new file mode 100644 index 0000000..48d096a --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/helpers/helpers.go @@ -0,0 +1,518 @@ +// Package helpers implements utility functionality common to many +// CFSSL packages. +package helpers + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "errors" + "io/ioutil" + "math/big" + + "strings" + "time" + + "github.com/cloudflare/cfssl/crypto/pkcs7" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers/derhelpers" + "github.com/cloudflare/cfssl/log" + "golang.org/x/crypto/pkcs12" +) + +// OneYear is a time.Duration representing a year's worth of seconds. +const OneYear = 8760 * time.Hour + +// OneDay is a time.Duration representing a day's worth of seconds. +const OneDay = 24 * time.Hour + +// InclusiveDate returns the time.Time representation of a date - 1 +// nanosecond. This allows time.After to be used inclusively. +func InclusiveDate(year int, month time.Month, day int) time.Time { + return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond) +} + +// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop +// issuing certificates valid for more than 5 years. +var Jul2012 = InclusiveDate(2012, time.July, 01) + +// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop +// issuing certificates valid for more than 39 months. +var Apr2015 = InclusiveDate(2015, time.April, 01) + +// KeyLength returns the bit size of ECDSA or RSA PublicKey +func KeyLength(key interface{}) int { + if key == nil { + return 0 + } + if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok { + return ecdsaKey.Curve.Params().BitSize + } else if rsaKey, ok := key.(*rsa.PublicKey); ok { + return rsaKey.N.BitLen() + } + + return 0 +} + +// ExpiryTime returns the time when the certificate chain is expired. +func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) { + if len(chain) == 0 { + return + } + + notAfter = chain[0].NotAfter + for _, cert := range chain { + if notAfter.After(cert.NotAfter) { + notAfter = cert.NotAfter + } + } + return +} + +// MonthsValid returns the number of months for which a certificate is valid. +func MonthsValid(c *x509.Certificate) int { + issued := c.NotBefore + expiry := c.NotAfter + years := (expiry.Year() - issued.Year()) + months := years*12 + int(expiry.Month()) - int(issued.Month()) + + // Round up if valid for less than a full month + if expiry.Day() > issued.Day() { + months++ + } + return months +} + +// ValidExpiry determines if a certificate is valid for an acceptable +// length of time per the CA/Browser Forum baseline requirements. +// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf +func ValidExpiry(c *x509.Certificate) bool { + issued := c.NotBefore + + var maxMonths int + switch { + case issued.After(Apr2015): + maxMonths = 39 + case issued.After(Jul2012): + maxMonths = 60 + case issued.Before(Jul2012): + maxMonths = 120 + } + + if MonthsValid(c) > maxMonths { + return false + } + return true +} + +// SignatureString returns the TLS signature string corresponding to +// an X509 signature algorithm. +func SignatureString(alg x509.SignatureAlgorithm) string { + switch alg { + case x509.MD2WithRSA: + return "MD2WithRSA" + case x509.MD5WithRSA: + return "MD5WithRSA" + case x509.SHA1WithRSA: + return "SHA1WithRSA" + case x509.SHA256WithRSA: + return "SHA256WithRSA" + case x509.SHA384WithRSA: + return "SHA384WithRSA" + case x509.SHA512WithRSA: + return "SHA512WithRSA" + case x509.DSAWithSHA1: + return "DSAWithSHA1" + case x509.DSAWithSHA256: + return "DSAWithSHA256" + case x509.ECDSAWithSHA1: + return "ECDSAWithSHA1" + case x509.ECDSAWithSHA256: + return "ECDSAWithSHA256" + case x509.ECDSAWithSHA384: + return "ECDSAWithSHA384" + case x509.ECDSAWithSHA512: + return "ECDSAWithSHA512" + default: + return "Unknown Signature" + } +} + +// HashAlgoString returns the hash algorithm name contains in the signature +// method. +func HashAlgoString(alg x509.SignatureAlgorithm) string { + switch alg { + case x509.MD2WithRSA: + return "MD2" + case x509.MD5WithRSA: + return "MD5" + case x509.SHA1WithRSA: + return "SHA1" + case x509.SHA256WithRSA: + return "SHA256" + case x509.SHA384WithRSA: + return "SHA384" + case x509.SHA512WithRSA: + return "SHA512" + case x509.DSAWithSHA1: + return "SHA1" + case x509.DSAWithSHA256: + return "SHA256" + case x509.ECDSAWithSHA1: + return "SHA1" + case x509.ECDSAWithSHA256: + return "SHA256" + case x509.ECDSAWithSHA384: + return "SHA384" + case x509.ECDSAWithSHA512: + return "SHA512" + default: + return "Unknown Hash Algorithm" + } +} + +// EncodeCertificatesPEM encodes a number of x509 certficates to PEM +func EncodeCertificatesPEM(certs []*x509.Certificate) []byte { + var buffer bytes.Buffer + for _, cert := range certs { + pem.Encode(&buffer, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }) + } + + return buffer.Bytes() +} + +// EncodeCertificatePEM encodes a single x509 certficates to PEM +func EncodeCertificatePEM(cert *x509.Certificate) []byte { + return EncodeCertificatesPEM([]*x509.Certificate{cert}) +} + +// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them, +// can handle PEM encoded PKCS #7 structures. +func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) { + var certs []*x509.Certificate + var err error + certsPEM = bytes.TrimSpace(certsPEM) + for len(certsPEM) > 0 { + var cert []*x509.Certificate + cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM) + if err != nil { + + return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed) + } else if cert == nil { + break + } + + certs = append(certs, cert...) + } + if len(certsPEM) > 0 { + return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) + } + return certs, nil +} + +// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key, +// either PKCS #7, PKCS #12, or raw x509. +func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) { + certsDER = bytes.TrimSpace(certsDER) + pkcs7data, err := pkcs7.ParsePKCS7(certsDER) + if err != nil { + var pkcs12data interface{} + certs = make([]*x509.Certificate, 1) + pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password) + if err != nil { + certs, err = x509.ParseCertificates(certsDER) + if err != nil { + return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) + } + } else { + key = pkcs12data.(crypto.Signer) + } + } else { + if pkcs7data.ContentInfo != "SignedData" { + return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info")) + } + certs = pkcs7data.Content.SignedData.Certificates + } + if certs == nil { + return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed) + } + return certs, key, nil +} + +// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed. +func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) { + cert, err := ParseCertificatePEM(certPEM) + if err != nil { + return nil, err + } + + if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err) + } + return cert, nil +} + +// ParseCertificatePEM parses and returns a PEM-encoded certificate, +// can handle PEM encoded PKCS #7 structures. +func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) { + certPEM = bytes.TrimSpace(certPEM) + cert, rest, err := ParseOneCertificateFromPEM(certPEM) + if err != nil { + // Log the actual parsing error but throw a default parse error message. + log.Debugf("Certificate parsing error: %v", err) + return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed) + } else if cert == nil { + return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) + } else if len(rest) > 0 { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object")) + } else if len(cert) > 1 { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate")) + } + return cert[0], nil +} + +// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object, +// either a raw x509 certificate or a PKCS #7 structure possibly containing +// multiple certificates, from the top of certsPEM, which itself may +// contain multiple PEM encoded certificate objects. +func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) { + + block, rest := pem.Decode(certsPEM) + if block == nil { + return nil, rest, nil + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes) + if err != nil { + return nil, rest, err + } + if pkcs7data.ContentInfo != "SignedData" { + return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing") + } + certs := pkcs7data.Content.SignedData.Certificates + if certs == nil { + return nil, rest, errors.New("PKCS #7 structure contains no certificates") + } + return certs, rest, nil + } + var certs = []*x509.Certificate{cert} + return certs, rest, nil +} + +// LoadPEMCertPool loads a pool of PEM certificates from file. +func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) { + if certsFile == "" { + return nil, nil + } + pemCerts, err := ioutil.ReadFile(certsFile) + if err != nil { + return nil, err + } + + return PEMToCertPool(pemCerts) +} + +// PEMToCertPool concerts PEM certificates to a CertPool. +func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) { + if len(pemCerts) == 0 { + return nil, nil + } + + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(pemCerts) { + return nil, errors.New("failed to load cert pool") + } + + return certPool, nil +} + +// ParsePrivateKeyPEM parses and returns a PEM-encoded private +// key. The private key may be either an unencrypted PKCS#8, PKCS#1, +// or elliptic private key. +func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) { + return ParsePrivateKeyPEMWithPassword(keyPEM, nil) +} + +// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private +// key. The private key may be a potentially encrypted PKCS#8, PKCS#1, +// or elliptic private key. +func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) { + keyDER, err := GetKeyDERFromPEM(keyPEM, password) + if err != nil { + return nil, err + } + + return derhelpers.ParsePrivateKeyDER(keyDER) +} + +// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes. +func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) { + keyDER, _ := pem.Decode(in) + if keyDER != nil { + if procType, ok := keyDER.Headers["Proc-Type"]; ok { + if strings.Contains(procType, "ENCRYPTED") { + if password != nil { + return x509.DecryptPEMBlock(keyDER, password) + } + return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted) + } + } + return keyDER.Bytes, nil + } + + return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed) +} + +// CheckSignature verifies a signature made by the key on a CSR, such +// as on the CSR itself. +func CheckSignature(csr *x509.CertificateRequest, algo x509.SignatureAlgorithm, signed, signature []byte) error { + var hashType crypto.Hash + + switch algo { + case x509.SHA1WithRSA, x509.ECDSAWithSHA1: + hashType = crypto.SHA1 + case x509.SHA256WithRSA, x509.ECDSAWithSHA256: + hashType = crypto.SHA256 + case x509.SHA384WithRSA, x509.ECDSAWithSHA384: + hashType = crypto.SHA384 + case x509.SHA512WithRSA, x509.ECDSAWithSHA512: + hashType = crypto.SHA512 + default: + return x509.ErrUnsupportedAlgorithm + } + + if !hashType.Available() { + return x509.ErrUnsupportedAlgorithm + } + h := hashType.New() + + h.Write(signed) + digest := h.Sum(nil) + + switch pub := csr.PublicKey.(type) { + case *rsa.PublicKey: + return rsa.VerifyPKCS1v15(pub, hashType, digest, signature) + case *ecdsa.PublicKey: + ecdsaSig := new(struct{ R, S *big.Int }) + if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("x509: ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) { + return errors.New("x509: ECDSA verification failure") + } + return nil + } + return x509.ErrUnsupportedAlgorithm +} + +// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request. +func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) { + in = bytes.TrimSpace(in) + p, rest := pem.Decode(in) + if p != nil { + if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" { + return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest) + } + + csr, err = x509.ParseCertificateRequest(p.Bytes) + } else { + csr, err = x509.ParseCertificateRequest(in) + } + + if err != nil { + return nil, rest, err + } + + err = CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature) + if err != nil { + return nil, rest, err + } + + return csr, rest, nil +} + +// ParseCSRPEM parses a PEM-encoded certificiate signing request. +// It does not check the signature. This is useful for dumping data from a CSR +// locally. +func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) { + block, _ := pem.Decode([]byte(csrPEM)) + der := block.Bytes + csrObject, err := x509.ParseCertificateRequest(der) + + if err != nil { + return nil, err + } + + return csrObject, nil +} + +// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer. +func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm { + switch pub := priv.Public().(type) { + case *rsa.PublicKey: + bitLength := pub.N.BitLen() + switch { + case bitLength >= 4096: + return x509.SHA512WithRSA + case bitLength >= 3072: + return x509.SHA384WithRSA + case bitLength >= 2048: + return x509.SHA256WithRSA + default: + return x509.SHA1WithRSA + } + case *ecdsa.PublicKey: + switch pub.Curve { + case elliptic.P521(): + return x509.ECDSAWithSHA512 + case elliptic.P384(): + return x509.ECDSAWithSHA384 + case elliptic.P256(): + return x509.ECDSAWithSHA256 + default: + return x509.ECDSAWithSHA1 + } + default: + return x509.UnknownSignatureAlgorithm + } +} + +// LoadClientCertificate load key/certificate from pem files +func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) { + if certFile != "" && keyFile != "" { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + log.Critical("Unable to read client certificate from file: %s or key from file: %s", certFile, keyFile) + return nil, err + } + log.Debug("Client certificate loaded ") + return &cert, nil + } + return nil, nil +} + +// CreateTLSConfig creates a tls.Config object from certs and roots +func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config { + var certs []tls.Certificate + if cert != nil { + certs = []tls.Certificate{*cert} + } + return &tls.Config{ + Certificates: certs, + RootCAs: remoteCAs, + } +} diff --git a/vendor/github.com/cloudflare/cfssl/info/info.go b/vendor/github.com/cloudflare/cfssl/info/info.go new file mode 100644 index 0000000..926a411 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/info/info.go @@ -0,0 +1,15 @@ +// Package info contains the definitions for the info endpoint +package info + +// Req is the request struct for an info API request. +type Req struct { + Label string `json:"label"` + Profile string `json:"profile"` +} + +// Resp is the response for an Info API request. +type Resp struct { + Certificate string `json:"certificate"` + Usage []string `json:"usages"` + ExpiryString string `json:"expiry"` +} diff --git a/vendor/github.com/cloudflare/cfssl/initca/initca.go b/vendor/github.com/cloudflare/cfssl/initca/initca.go new file mode 100644 index 0000000..c9ab3bb --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/initca/initca.go @@ -0,0 +1,224 @@ +// Package initca contains code to initialise a certificate authority, +// generating a new root key and certificate. +package initca + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "errors" + "io/ioutil" + "time" + + "github.com/cloudflare/cfssl/config" + "github.com/cloudflare/cfssl/csr" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/log" + "github.com/cloudflare/cfssl/signer" + "github.com/cloudflare/cfssl/signer/local" +) + +// validator contains the default validation logic for certificate +// authority certificates. The only requirement here is that the +// certificate have a non-empty subject field. +func validator(req *csr.CertificateRequest) error { + if req.CN != "" { + return nil + } + + if len(req.Names) == 0 { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information")) + } + + for i := range req.Names { + if csr.IsNameEmpty(req.Names[i]) { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information")) + } + } + + return nil +} + +// New creates a new root certificate from the certificate request. +func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) { + policy := CAPolicy() + if req.CA != nil { + if req.CA.Expiry != "" { + policy.Default.ExpiryString = req.CA.Expiry + policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) + if err != nil { + return + } + } + + policy.Default.CAConstraint.MaxPathLen = req.CA.PathLength + if req.CA.PathLength != 0 && req.CA.PathLenZero == true { + log.Infof("ignore invalid 'pathlenzero' value") + } else { + policy.Default.CAConstraint.MaxPathLenZero = req.CA.PathLenZero + } + } + + g := &csr.Generator{Validator: validator} + csrPEM, key, err = g.ProcessRequest(req) + if err != nil { + log.Errorf("failed to process request: %v", err) + key = nil + return + } + + priv, err := helpers.ParsePrivateKeyPEM(key) + if err != nil { + log.Errorf("failed to parse private key: %v", err) + return + } + + s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), policy) + if err != nil { + log.Errorf("failed to create signer: %v", err) + return + } + + signReq := signer.SignRequest{Hosts: req.Hosts, Request: string(csrPEM)} + cert, err = s.Sign(signReq) + + return + +} + +// NewFromPEM creates a new root certificate from the key file passed in. +func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert, csrPEM []byte, err error) { + privData, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, nil, err + } + + priv, err := helpers.ParsePrivateKeyPEM(privData) + if err != nil { + return nil, nil, err + } + + return NewFromSigner(req, priv) +} + +// RenewFromPEM re-creates a root certificate from the CA cert and key +// files. The resulting root certificate will have the input CA certificate +// as the template and have the same expiry length. E.g. the exsiting CA +// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate +// will be valid from now and expire in one year as well. +func RenewFromPEM(caFile, keyFile string) ([]byte, error) { + caBytes, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, err + } + + ca, err := helpers.ParseCertificatePEM(caBytes) + if err != nil { + return nil, err + } + + keyBytes, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, err + } + + key, err := helpers.ParsePrivateKeyPEM(keyBytes) + if err != nil { + return nil, err + } + + return RenewFromSigner(ca, key) + +} + +// NewFromSigner creates a new root certificate from a crypto.Signer. +func NewFromSigner(req *csr.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) { + policy := CAPolicy() + if req.CA != nil { + if req.CA.Expiry != "" { + policy.Default.ExpiryString = req.CA.Expiry + policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) + if err != nil { + return nil, nil, err + } + } + + policy.Default.CAConstraint.MaxPathLen = req.CA.PathLength + if req.CA.PathLength != 0 && req.CA.PathLenZero == true { + log.Infof("ignore invalid 'pathlenzero' value") + } else { + policy.Default.CAConstraint.MaxPathLenZero = req.CA.PathLenZero + } + } + + csrPEM, err = csr.Generate(priv, req) + if err != nil { + return nil, nil, err + } + + s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), policy) + if err != nil { + log.Errorf("failed to create signer: %v", err) + return + } + + signReq := signer.SignRequest{Request: string(csrPEM)} + cert, err = s.Sign(signReq) + return +} + +// RenewFromSigner re-creates a root certificate from the CA cert and crypto.Signer. +// The resulting root certificate will have ca certificate +// as the template and have the same expiry length. E.g. the exsiting CA +// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate +// will be valid from now and expire in one year as well. +func RenewFromSigner(ca *x509.Certificate, priv crypto.Signer) ([]byte, error) { + if !ca.IsCA { + return nil, errors.New("input certificate is not a CA cert") + } + + // matching certificate public key vs private key + switch { + case ca.PublicKeyAlgorithm == x509.RSA: + + var rsaPublicKey *rsa.PublicKey + var ok bool + if rsaPublicKey, ok = priv.Public().(*rsa.PublicKey); !ok { + return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + if ca.PublicKey.(*rsa.PublicKey).N.Cmp(rsaPublicKey.N) != 0 { + return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + case ca.PublicKeyAlgorithm == x509.ECDSA: + var ecdsaPublicKey *ecdsa.PublicKey + var ok bool + if ecdsaPublicKey, ok = priv.Public().(*ecdsa.PublicKey); !ok { + return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + if ca.PublicKey.(*ecdsa.PublicKey).X.Cmp(ecdsaPublicKey.X) != 0 { + return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + default: + return nil, cferr.New(cferr.PrivateKeyError, cferr.NotRSAOrECC) + } + + req := csr.ExtractCertificateRequest(ca) + + cert, _, err := NewFromSigner(req, priv) + return cert, err + +} + +// CAPolicy contains the CA issuing policy as default policy. +var CAPolicy = func() *config.Signing { + return &config.Signing{ + Default: &config.SigningProfile{ + Usage: []string{"cert sign", "crl sign"}, + ExpiryString: "43800h", + Expiry: 5 * helpers.OneYear, + CAConstraint: config.CAConstraint{IsCA: true}, + }, + } +} diff --git a/vendor/github.com/cloudflare/cfssl/log/log.go b/vendor/github.com/cloudflare/cfssl/log/log.go new file mode 100644 index 0000000..956c9d4 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/log/log.go @@ -0,0 +1,162 @@ +// Package log implements a wrapper around the Go standard library's +// logging package. Clients should set the current log level; only +// messages below that level will actually be logged. For example, if +// Level is set to LevelWarning, only log messages at the Warning, +// Error, and Critical levels will be logged. +package log + +import ( + "fmt" + "log" + "os" +) + +// The following constants represent logging levels in increasing levels of seriousness. +const ( + // LevelDebug is the log level for Debug statements. + LevelDebug = iota + // LevelInfo is the log level for Info statements. + LevelInfo + // LevelWarning is the log level for Warning statements. + LevelWarning + // LevelError is the log level for Error statements. + LevelError + // LevelCritical is the log level for Critical statements. + LevelCritical + // LevelFatal is the log level for Fatal statements. + LevelFatal +) + +var levelPrefix = [...]string{ + LevelDebug: "DEBUG", + LevelInfo: "INFO", + LevelWarning: "WARNING", + LevelError: "ERROR", + LevelCritical: "CRITICAL", + LevelFatal: "FATAL", +} + +// Level stores the current logging level. +var Level = LevelInfo + +// SyslogWriter specifies the necessary methods for an alternate output +// destination passed in via SetLogger. +// +// SyslogWriter is satisfied by *syslog.Writer. +type SyslogWriter interface { + Debug(string) + Info(string) + Warning(string) + Err(string) + Crit(string) + Emerg(string) +} + +// syslogWriter stores the SetLogger() parameter. +var syslogWriter SyslogWriter + +// SetLogger sets the output used for output by this package. +// A *syslog.Writer is a good choice for the logger parameter. +// Call with a nil parameter to revert to default behavior. +func SetLogger(logger SyslogWriter) { + syslogWriter = logger +} + +func print(l int, msg string) { + if l >= Level { + if syslogWriter != nil { + switch l { + case LevelDebug: + syslogWriter.Debug(msg) + case LevelInfo: + syslogWriter.Info(msg) + case LevelWarning: + syslogWriter.Warning(msg) + case LevelError: + syslogWriter.Err(msg) + case LevelCritical: + syslogWriter.Crit(msg) + case LevelFatal: + syslogWriter.Emerg(msg) + } + } else { + log.Printf("[%s] %s", levelPrefix[l], msg) + } + } +} + +func outputf(l int, format string, v []interface{}) { + print(l, fmt.Sprintf(format, v...)) +} + +func output(l int, v []interface{}) { + print(l, fmt.Sprint(v...)) +} + +// Fatalf logs a formatted message at the "fatal" level and then exits. The +// arguments are handled in the same manner as fmt.Printf. +func Fatalf(format string, v ...interface{}) { + outputf(LevelFatal, format, v) + os.Exit(1) +} + +// Fatal logs its arguments at the "fatal" level and then exits. +func Fatal(v ...interface{}) { + output(LevelFatal, v) + os.Exit(1) +} + +// Criticalf logs a formatted message at the "critical" level. The +// arguments are handled in the same manner as fmt.Printf. +func Criticalf(format string, v ...interface{}) { + outputf(LevelCritical, format, v) +} + +// Critical logs its arguments at the "critical" level. +func Critical(v ...interface{}) { + output(LevelCritical, v) +} + +// Errorf logs a formatted message at the "error" level. The arguments +// are handled in the same manner as fmt.Printf. +func Errorf(format string, v ...interface{}) { + outputf(LevelError, format, v) +} + +// Error logs its arguments at the "error" level. +func Error(v ...interface{}) { + output(LevelError, v) +} + +// Warningf logs a formatted message at the "warning" level. The +// arguments are handled in the same manner as fmt.Printf. +func Warningf(format string, v ...interface{}) { + outputf(LevelWarning, format, v) +} + +// Warning logs its arguments at the "warning" level. +func Warning(v ...interface{}) { + output(LevelWarning, v) +} + +// Infof logs a formatted message at the "info" level. The arguments +// are handled in the same manner as fmt.Printf. +func Infof(format string, v ...interface{}) { + outputf(LevelInfo, format, v) +} + +// Info logs its arguments at the "info" level. +func Info(v ...interface{}) { + output(LevelInfo, v) +} + +// Debugf logs a formatted message at the "debug" level. The arguments +// are handled in the same manner as fmt.Printf. +func Debugf(format string, v ...interface{}) { + outputf(LevelDebug, format, v) +} + +// Debug logs its arguments at the "debug" level. +func Debug(v ...interface{}) { + output(LevelDebug, v) +} diff --git a/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go b/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go new file mode 100644 index 0000000..a19b113 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go @@ -0,0 +1,13 @@ +// Package config in the ocsp directory provides configuration data for an OCSP +// signer. +package config + +import "time" + +// Config contains configuration information required to set up an OCSP signer. +type Config struct { + CACertFile string + ResponderCertFile string + KeyFile string + Interval time.Duration +} diff --git a/vendor/github.com/cloudflare/cfssl/signer/local/local.go b/vendor/github.com/cloudflare/cfssl/signer/local/local.go new file mode 100644 index 0000000..6f2b5e5 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/signer/local/local.go @@ -0,0 +1,469 @@ +// Package local implements certificate signature functionality for CFSSL. +package local + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "io" + "io/ioutil" + "math/big" + "net" + "net/mail" + "os" + + "github.com/cloudflare/cfssl/certdb" + "github.com/cloudflare/cfssl/config" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/info" + "github.com/cloudflare/cfssl/log" + "github.com/cloudflare/cfssl/signer" + "github.com/google/certificate-transparency/go" + "github.com/google/certificate-transparency/go/client" +) + +// Signer contains a signer that uses the standard library to +// support both ECDSA and RSA CA keys. +type Signer struct { + ca *x509.Certificate + priv crypto.Signer + policy *config.Signing + sigAlgo x509.SignatureAlgorithm + dbAccessor certdb.Accessor +} + +// NewSigner creates a new Signer directly from a +// private key and certificate, with optional policy. +func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) { + if policy == nil { + policy = &config.Signing{ + Profiles: map[string]*config.SigningProfile{}, + Default: config.DefaultConfig()} + } + + if !policy.Valid() { + return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) + } + + return &Signer{ + ca: cert, + priv: priv, + sigAlgo: sigAlgo, + policy: policy, + }, nil +} + +// NewSignerFromFile generates a new local signer from a caFile +// and a caKey file, both PEM encoded. +func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) { + log.Debug("Loading CA: ", caFile) + ca, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, err + } + log.Debug("Loading CA key: ", caKeyFile) + cakey, err := ioutil.ReadFile(caKeyFile) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err) + } + + parsedCa, err := helpers.ParseCertificatePEM(ca) + if err != nil { + return nil, err + } + + strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD") + password := []byte(strPassword) + if strPassword == "" { + password = nil + } + + priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password) + if err != nil { + log.Debug("Malformed private key %v", err) + return nil, err + } + + return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy) +} + +func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile) (cert []byte, err error) { + var distPoints = template.CRLDistributionPoints + err = signer.FillTemplate(template, s.policy.Default, profile) + if distPoints != nil && len(distPoints) > 0 { + template.CRLDistributionPoints = distPoints + } + if err != nil { + return + } + + var initRoot bool + if s.ca == nil { + if !template.IsCA { + err = cferr.New(cferr.PolicyError, cferr.InvalidRequest) + return + } + template.DNSNames = nil + template.EmailAddresses = nil + s.ca = template + initRoot = true + } + + derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) + } + if initRoot { + s.ca, err = x509.ParseCertificate(derBytes) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + } + + cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + log.Infof("signed certificate with serial number %d", template.SerialNumber) + return +} + +// replaceSliceIfEmpty replaces the contents of replaced with newContents if +// the slice referenced by replaced is empty +func replaceSliceIfEmpty(replaced, newContents *[]string) { + if len(*replaced) == 0 { + *replaced = *newContents + } +} + +// PopulateSubjectFromCSR has functionality similar to Name, except +// it fills the fields of the resulting pkix.Name with req's if the +// subject's corresponding fields are empty +func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name { + // if no subject, use req + if s == nil { + return req + } + + name := s.Name() + + if name.CommonName == "" { + name.CommonName = req.CommonName + } + + replaceSliceIfEmpty(&name.Country, &req.Country) + replaceSliceIfEmpty(&name.Province, &req.Province) + replaceSliceIfEmpty(&name.Locality, &req.Locality) + replaceSliceIfEmpty(&name.Organization, &req.Organization) + replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit) + if name.SerialNumber == "" { + name.SerialNumber = req.SerialNumber + } + return name +} + +// OverrideHosts fills template's IPAddresses, EmailAddresses, and DNSNames with the +// content of hosts, if it is not nil. +func OverrideHosts(template *x509.Certificate, hosts []string) { + if hosts != nil { + template.IPAddresses = []net.IP{} + template.EmailAddresses = []string{} + template.DNSNames = []string{} + } + + for i := range hosts { + if ip := net.ParseIP(hosts[i]); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil { + template.EmailAddresses = append(template.EmailAddresses, email.Address) + } else { + template.DNSNames = append(template.DNSNames, hosts[i]) + } + } + +} + +// Sign signs a new certificate based on the PEM-encoded client +// certificate or certificate request with the signing profile, +// specified by profileName. +func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) { + profile, err := signer.Profile(s, req.Profile) + if err != nil { + return + } + + block, _ := pem.Decode([]byte(req.Request)) + if block == nil { + return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed) + } + + if block.Type != "NEW CERTIFICATE REQUEST" && block.Type != "CERTIFICATE REQUEST" { + return nil, cferr.Wrap(cferr.CSRError, + cferr.BadRequest, errors.New("not a certificate or csr")) + } + + csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes) + if err != nil { + return nil, err + } + + // Copy out only the fields from the CSR authorized by policy. + safeTemplate := x509.Certificate{} + // If the profile contains no explicit whitelist, assume that all fields + // should be copied from the CSR. + if profile.CSRWhitelist == nil { + safeTemplate = *csrTemplate + } else { + if profile.CSRWhitelist.Subject { + safeTemplate.Subject = csrTemplate.Subject + } + if profile.CSRWhitelist.PublicKeyAlgorithm { + safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm + } + if profile.CSRWhitelist.PublicKey { + safeTemplate.PublicKey = csrTemplate.PublicKey + } + if profile.CSRWhitelist.SignatureAlgorithm { + safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm + } + if profile.CSRWhitelist.DNSNames { + safeTemplate.DNSNames = csrTemplate.DNSNames + } + if profile.CSRWhitelist.IPAddresses { + safeTemplate.IPAddresses = csrTemplate.IPAddresses + } + if profile.CSRWhitelist.EmailAddresses { + safeTemplate.EmailAddresses = csrTemplate.EmailAddresses + } + } + + if req.CRLOverride != "" { + safeTemplate.CRLDistributionPoints = []string{req.CRLOverride} + } + + if safeTemplate.IsCA { + if !profile.CAConstraint.IsCA { + log.Error("local signer policy disallows issuing CA certificate") + return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest) + } + + if s.ca != nil && s.ca.MaxPathLen > 0 { + if safeTemplate.MaxPathLen >= s.ca.MaxPathLen { + log.Error("local signer certificate disallows CA MaxPathLen extending") + // do not sign a cert with pathlen > current + return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest) + } + } else if s.ca != nil && s.ca.MaxPathLen == 0 && s.ca.MaxPathLenZero { + log.Error("local signer certificate disallows issuing CA certificate") + // signer has pathlen of 0, do not sign more intermediate CAs + return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest) + } + } + + OverrideHosts(&safeTemplate, req.Hosts) + safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject) + + // If there is a whitelist, ensure that both the Common Name and SAN DNSNames match + if profile.NameWhitelist != nil { + if safeTemplate.Subject.CommonName != "" { + if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil { + return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist) + } + } + for _, name := range safeTemplate.DNSNames { + if profile.NameWhitelist.Find([]byte(name)) == nil { + return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist) + } + } + for _, name := range safeTemplate.EmailAddresses { + if profile.NameWhitelist.Find([]byte(name)) == nil { + return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist) + } + } + } + + if profile.ClientProvidesSerialNumbers { + if req.Serial == nil { + return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial) + } + safeTemplate.SerialNumber = req.Serial + } else { + // RFC 5280 4.1.2.2: + // Certificate users MUST be able to handle serialNumber + // values up to 20 octets. Conforming CAs MUST NOT use + // serialNumber values longer than 20 octets. + // + // If CFSSL is providing the serial numbers, it makes + // sense to use the max supported size. + serialNumber := make([]byte, 20) + _, err = io.ReadFull(rand.Reader, serialNumber) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) + } + + // SetBytes interprets buf as the bytes of a big-endian + // unsigned integer. The leading byte should be masked + // off to ensure it isn't negative. + serialNumber[0] &= 0x7F + + safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber) + } + + if len(req.Extensions) > 0 { + for _, ext := range req.Extensions { + oid := asn1.ObjectIdentifier(ext.ID) + if !profile.ExtensionWhitelist[oid.String()] { + return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) + } + + rawValue, err := hex.DecodeString(ext.Value) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err) + } + + safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{ + Id: oid, + Critical: ext.Critical, + Value: rawValue, + }) + } + } + + var certTBS = safeTemplate + + if len(profile.CTLogServers) > 0 { + // Add a poison extension which prevents validation + var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}} + var poisonedPreCert = certTBS + poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension) + cert, err = s.sign(&poisonedPreCert, profile) + if err != nil { + return + } + + derCert, _ := pem.Decode(cert) + prechain := []ct.ASN1Cert{derCert.Bytes, s.ca.Raw} + var sctList []ct.SignedCertificateTimestamp + + for _, server := range profile.CTLogServers { + log.Infof("submitting poisoned precertificate to %s", server) + var ctclient = client.New(server, nil) + var resp *ct.SignedCertificateTimestamp + resp, err = ctclient.AddPreChain(prechain) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err) + } + sctList = append(sctList, *resp) + } + + var serializedSCTList []byte + serializedSCTList, err = serializeSCTList(sctList) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) + } + + // Serialize again as an octet string before embedding + serializedSCTList, err = asn1.Marshal(serializedSCTList) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) + } + + var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList} + certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension) + } + var signedCert []byte + signedCert, err = s.sign(&certTBS, profile) + if err != nil { + return nil, err + } + + if s.dbAccessor != nil { + var certRecord = certdb.CertificateRecord{ + Serial: certTBS.SerialNumber.String(), + // this relies on the specific behavior of x509.CreateCertificate + // which updates certTBS AuthorityKeyId from the signer's SubjectKeyId + AKI: hex.EncodeToString(certTBS.AuthorityKeyId), + CALabel: req.Label, + Status: "good", + Expiry: certTBS.NotAfter, + PEM: string(signedCert), + } + + err = s.dbAccessor.InsertCertificate(certRecord) + if err != nil { + return nil, err + } + log.Debug("saved certificate with serial number ", certTBS.SerialNumber) + } + + return signedCert, nil +} + +func serializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) { + var buf bytes.Buffer + for _, sct := range sctList { + sct, err := ct.SerializeSCT(sct) + if err != nil { + return nil, err + } + binary.Write(&buf, binary.BigEndian, uint16(len(sct))) + buf.Write(sct) + } + + var sctListLengthField = make([]byte, 2) + binary.BigEndian.PutUint16(sctListLengthField, uint16(buf.Len())) + return bytes.Join([][]byte{sctListLengthField, buf.Bytes()}, nil), nil +} + +// Info return a populated info.Resp struct or an error. +func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) { + cert, err := s.Certificate(req.Label, req.Profile) + if err != nil { + return + } + + profile, err := signer.Profile(s, req.Profile) + if err != nil { + return + } + + resp = new(info.Resp) + if cert.Raw != nil { + resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))) + } + resp.Usage = profile.Usage + resp.ExpiryString = profile.ExpiryString + + return +} + +// SigAlgo returns the RSA signer's signature algorithm. +func (s *Signer) SigAlgo() x509.SignatureAlgorithm { + return s.sigAlgo +} + +// Certificate returns the signer's certificate. +func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) { + cert := *s.ca + return &cert, nil +} + +// SetPolicy sets the signer's signature policy. +func (s *Signer) SetPolicy(policy *config.Signing) { + s.policy = policy +} + +// SetDBAccessor sets the signers' cert db accessor +func (s *Signer) SetDBAccessor(dba certdb.Accessor) { + s.dbAccessor = dba +} + +// Policy returns the signer's policy. +func (s *Signer) Policy() *config.Signing { + return s.policy +} diff --git a/vendor/github.com/cloudflare/cfssl/signer/signer.go b/vendor/github.com/cloudflare/cfssl/signer/signer.go new file mode 100644 index 0000000..a34fa45 --- /dev/null +++ b/vendor/github.com/cloudflare/cfssl/signer/signer.go @@ -0,0 +1,412 @@ +// Package signer implements certificate signature functionality for CFSSL. +package signer + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "math/big" + "strings" + "time" + + "github.com/cloudflare/cfssl/certdb" + "github.com/cloudflare/cfssl/config" + "github.com/cloudflare/cfssl/csr" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/info" +) + +// Subject contains the information that should be used to override the +// subject information when signing a certificate. +type Subject struct { + CN string + Names []csr.Name `json:"names"` + SerialNumber string +} + +// Extension represents a raw extension to be included in the certificate. The +// "value" field must be hex encoded. +type Extension struct { + ID config.OID `json:"id"` + Critical bool `json:"critical"` + Value string `json:"value"` +} + +// SignRequest stores a signature request, which contains the hostname, +// the CSR, optional subject information, and the signature profile. +// +// Extensions provided in the signRequest are copied into the certificate, as +// long as they are in the ExtensionWhitelist for the signer's policy. +// Extensions requested in the CSR are ignored, except for those processed by +// ParseCertificateRequest (mainly subjectAltName). +type SignRequest struct { + Hosts []string `json:"hosts"` + Request string `json:"certificate_request"` + Subject *Subject `json:"subject,omitempty"` + Profile string `json:"profile"` + CRLOverride string `json:"crl_override"` + Label string `json:"label"` + Serial *big.Int `json:"serial,omitempty"` + Extensions []Extension `json:"extensions,omitempty"` +} + +// appendIf appends to a if s is not an empty string. +func appendIf(s string, a *[]string) { + if s != "" { + *a = append(*a, s) + } +} + +// Name returns the PKIX name for the subject. +func (s *Subject) Name() pkix.Name { + var name pkix.Name + name.CommonName = s.CN + + for _, n := range s.Names { + appendIf(n.C, &name.Country) + appendIf(n.ST, &name.Province) + appendIf(n.L, &name.Locality) + appendIf(n.O, &name.Organization) + appendIf(n.OU, &name.OrganizationalUnit) + } + name.SerialNumber = s.SerialNumber + return name +} + +// SplitHosts takes a comma-spearated list of hosts and returns a slice +// with the hosts split +func SplitHosts(hostList string) []string { + if hostList == "" { + return nil + } + + return strings.Split(hostList, ",") +} + +// A Signer contains a CA's certificate and private key for signing +// certificates, a Signing policy to refer to and a SignatureAlgorithm. +type Signer interface { + Info(info.Req) (*info.Resp, error) + Policy() *config.Signing + SetDBAccessor(certdb.Accessor) + SetPolicy(*config.Signing) + SigAlgo() x509.SignatureAlgorithm + Sign(req SignRequest) (cert []byte, err error) +} + +// Profile gets the specific profile from the signer +func Profile(s Signer, profile string) (*config.SigningProfile, error) { + var p *config.SigningProfile + policy := s.Policy() + if policy != nil && policy.Profiles != nil && profile != "" { + p = policy.Profiles[profile] + } + + if p == nil && policy != nil { + p = policy.Default + } + + if p == nil { + return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil")) + } + return p, nil +} + +// DefaultSigAlgo returns an appropriate X.509 signature algorithm given +// the CA's private key. +func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm { + pub := priv.Public() + switch pub := pub.(type) { + case *rsa.PublicKey: + keySize := pub.N.BitLen() + switch { + case keySize >= 4096: + return x509.SHA512WithRSA + case keySize >= 3072: + return x509.SHA384WithRSA + case keySize >= 2048: + return x509.SHA256WithRSA + default: + return x509.SHA1WithRSA + } + case *ecdsa.PublicKey: + switch pub.Curve { + case elliptic.P256(): + return x509.ECDSAWithSHA256 + case elliptic.P384(): + return x509.ECDSAWithSHA384 + case elliptic.P521(): + return x509.ECDSAWithSHA512 + default: + return x509.ECDSAWithSHA1 + } + default: + return x509.UnknownSignatureAlgorithm + } +} + +// ParseCertificateRequest takes an incoming certificate request and +// builds a certificate template from it. +func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) { + csrv, err := x509.ParseCertificateRequest(csrBytes) + if err != nil { + err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err) + return + } + + err = helpers.CheckSignature(csrv, csrv.SignatureAlgorithm, csrv.RawTBSCertificateRequest, csrv.Signature) + if err != nil { + err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err) + return + } + + template = &x509.Certificate{ + Subject: csrv.Subject, + PublicKeyAlgorithm: csrv.PublicKeyAlgorithm, + PublicKey: csrv.PublicKey, + SignatureAlgorithm: s.SigAlgo(), + DNSNames: csrv.DNSNames, + IPAddresses: csrv.IPAddresses, + EmailAddresses: csrv.EmailAddresses, + } + + for _, val := range csrv.Extensions { + // Check the CSR for the X.509 BasicConstraints (RFC 5280, 4.2.1.9) + // extension and append to template if necessary + if val.Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 19}) { + var constraints csr.BasicConstraints + var rest []byte + + if rest, err = asn1.Unmarshal(val.Value, &constraints); err != nil { + return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err) + } else if len(rest) != 0 { + return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, errors.New("x509: trailing data after X.509 BasicConstraints")) + } + + template.BasicConstraintsValid = true + template.IsCA = constraints.IsCA + template.MaxPathLen = constraints.MaxPathLen + template.MaxPathLenZero = template.MaxPathLen == 0 + } + } + + return +} + +type subjectPublicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + SubjectPublicKey asn1.BitString +} + +// ComputeSKI derives an SKI from the certificate's public key in a +// standard manner. This is done by computing the SHA-1 digest of the +// SubjectPublicKeyInfo component of the certificate. +func ComputeSKI(template *x509.Certificate) ([]byte, error) { + pub := template.PublicKey + encodedPub, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + return nil, err + } + + var subPKI subjectPublicKeyInfo + _, err = asn1.Unmarshal(encodedPub, &subPKI) + if err != nil { + return nil, err + } + + pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes) + return pubHash[:], nil +} + +// FillTemplate is a utility function that tries to load as much of +// the certificate template as possible from the profiles and current +// template. It fills in the key uses, expiration, revocation URLs +// and SKI. +func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile) error { + ski, err := ComputeSKI(template) + + var ( + eku []x509.ExtKeyUsage + ku x509.KeyUsage + backdate time.Duration + expiry time.Duration + notBefore time.Time + notAfter time.Time + crlURL, ocspURL string + issuerURL = profile.IssuerURL + ) + + // The third value returned from Usages is a list of unknown key usages. + // This should be used when validating the profile at load, and isn't used + // here. + ku, eku, _ = profile.Usages() + if profile.IssuerURL == nil { + issuerURL = defaultProfile.IssuerURL + } + + if ku == 0 && len(eku) == 0 { + return cferr.New(cferr.PolicyError, cferr.NoKeyUsages) + } + + if expiry = profile.Expiry; expiry == 0 { + expiry = defaultProfile.Expiry + } + + if crlURL = profile.CRL; crlURL == "" { + crlURL = defaultProfile.CRL + } + if ocspURL = profile.OCSP; ocspURL == "" { + ocspURL = defaultProfile.OCSP + } + if backdate = profile.Backdate; backdate == 0 { + backdate = -5 * time.Minute + } else { + backdate = -1 * profile.Backdate + } + + if !profile.NotBefore.IsZero() { + notBefore = profile.NotBefore.UTC() + } else { + notBefore = time.Now().Round(time.Minute).Add(backdate).UTC() + } + + if !profile.NotAfter.IsZero() { + notAfter = profile.NotAfter.UTC() + } else { + notAfter = notBefore.Add(expiry).UTC() + } + + template.NotBefore = notBefore + template.NotAfter = notAfter + template.KeyUsage = ku + template.ExtKeyUsage = eku + template.BasicConstraintsValid = true + template.IsCA = profile.CAConstraint.IsCA + if template.IsCA { + template.MaxPathLen = profile.CAConstraint.MaxPathLen + if template.MaxPathLen == 0 { + template.MaxPathLenZero = profile.CAConstraint.MaxPathLenZero + } + template.DNSNames = nil + template.EmailAddresses = nil + } + template.SubjectKeyId = ski + + if ocspURL != "" { + template.OCSPServer = []string{ocspURL} + } + if crlURL != "" { + template.CRLDistributionPoints = []string{crlURL} + } + + if len(issuerURL) != 0 { + template.IssuingCertificateURL = issuerURL + } + if len(profile.Policies) != 0 { + err = addPolicies(template, profile.Policies) + if err != nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) + } + } + if profile.OCSPNoCheck { + ocspNoCheckExtension := pkix.Extension{ + Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5}, + Critical: false, + Value: []byte{0x05, 0x00}, + } + template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension) + } + + return nil +} + +type policyInformation struct { + PolicyIdentifier asn1.ObjectIdentifier + Qualifiers []interface{} `asn1:"tag:optional,omitempty"` +} + +type cpsPolicyQualifier struct { + PolicyQualifierID asn1.ObjectIdentifier + Qualifier string `asn1:"tag:optional,ia5"` +} + +type userNotice struct { + ExplicitText string `asn1:"tag:optional,utf8"` +} +type userNoticePolicyQualifier struct { + PolicyQualifierID asn1.ObjectIdentifier + Qualifier userNotice +} + +var ( + // Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents: + // iso(1) identified-organization(3) dod(6) internet(1) security(5) + // mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1) + iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} + // iso(1) identified-organization(3) dod(6) internet(1) security(5) + // mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2) + iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} + + // CTPoisonOID is the object ID of the critical poison extension for precertificates + // https://tools.ietf.org/html/rfc6962#page-9 + CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} + + // SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension + // https://tools.ietf.org/html/rfc6962#page-14 + SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} +) + +// addPolicies adds Certificate Policies and optional Policy Qualifiers to a +// certificate, based on the input config. Go's x509 library allows setting +// Certificate Policies easily, but does not support nested Policy Qualifiers +// under those policies. So we need to construct the ASN.1 structure ourselves. +func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error { + asn1PolicyList := []policyInformation{} + + for _, policy := range policies { + pi := policyInformation{ + // The PolicyIdentifier is an OID assigned to a given issuer. + PolicyIdentifier: asn1.ObjectIdentifier(policy.ID), + } + for _, qualifier := range policy.Qualifiers { + switch qualifier.Type { + case "id-qt-unotice": + pi.Qualifiers = append(pi.Qualifiers, + userNoticePolicyQualifier{ + PolicyQualifierID: iDQTUserNotice, + Qualifier: userNotice{ + ExplicitText: qualifier.Value, + }, + }) + case "id-qt-cps": + pi.Qualifiers = append(pi.Qualifiers, + cpsPolicyQualifier{ + PolicyQualifierID: iDQTCertificationPracticeStatement, + Qualifier: qualifier.Value, + }) + default: + return errors.New("Invalid qualifier type in Policies " + qualifier.Type) + } + } + asn1PolicyList = append(asn1PolicyList, pi) + } + + asn1Bytes, err := asn1.Marshal(asn1PolicyList) + if err != nil { + return err + } + + template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{ + Id: asn1.ObjectIdentifier{2, 5, 29, 32}, + Critical: false, + Value: asn1Bytes, + }) + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/LICENSE b/vendor/github.com/cloudfoundry/gosigar/LICENSE new file mode 100644 index 0000000..11069ed --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/cloudfoundry/gosigar/NOTICE b/vendor/github.com/cloudfoundry/gosigar/NOTICE new file mode 100644 index 0000000..fda553b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/NOTICE @@ -0,0 +1,9 @@ +Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache License, Version 2.0 (the "License"). +You may not use this product except in compliance with the License. + +This product includes a number of subcomponents with +separate copyright notices and license terms. Your use of these +subcomponents is subject to the terms and conditions of the +subcomponent's license, as noted in the LICENSE file. \ No newline at end of file diff --git a/vendor/github.com/cloudfoundry/gosigar/README.md b/vendor/github.com/cloudfoundry/gosigar/README.md new file mode 100644 index 0000000..90d51f9 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/README.md @@ -0,0 +1,22 @@ +# Go sigar + +## Overview + +Go sigar is a golang implementation of the +[sigar API](https://github.com/hyperic/sigar). The Go version of +sigar has a very similar interface, but is being written from scratch +in pure go/cgo, rather than cgo bindings for libsigar. + +## Test drive + + $ go get github.com/cloudfoundry/gosigar + $ cd $GOPATH/src/github.com/cloudfoundry/gosigar/examples + $ go run uptime.go + +## Supported platforms + +Currently targeting modern flavors of darwin and linux. + +## License + +Apache 2.0 diff --git a/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go b/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go new file mode 100644 index 0000000..0e80aa4 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go @@ -0,0 +1,69 @@ +package sigar + +import ( + "time" +) + +type ConcreteSigar struct{} + +func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) { + // samplesCh is buffered to 1 value to immediately return first CPU sample + samplesCh := make(chan Cpu, 1) + + stopCh := make(chan struct{}) + + go func() { + var cpuUsage Cpu + + // Immediately provide non-delta value. + // samplesCh is buffered to 1 value, so it will not block. + cpuUsage.Get() + samplesCh <- cpuUsage + + ticker := time.NewTicker(collectionInterval) + + for { + select { + case <-ticker.C: + previousCpuUsage := cpuUsage + + cpuUsage.Get() + + select { + case samplesCh <- cpuUsage.Delta(previousCpuUsage): + default: + // Include default to avoid channel blocking + } + + case <-stopCh: + return + } + } + }() + + return samplesCh, stopCh +} + +func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) { + l := LoadAverage{} + err := l.Get() + return l, err +} + +func (c *ConcreteSigar) GetMem() (Mem, error) { + m := Mem{} + err := m.Get() + return m, err +} + +func (c *ConcreteSigar) GetSwap() (Swap, error) { + s := Swap{} + err := s.Get() + return s, err +} + +func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) { + f := FileSystemUsage{} + err := f.Get(path) + return f, err +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go b/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go new file mode 100644 index 0000000..e3a8c4b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go @@ -0,0 +1,467 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "syscall" + "time" + "unsafe" +) + +func (self *LoadAverage) Get() error { + avg := []C.double{0, 0, 0} + + C.getloadavg(&avg[0], C.int(len(avg))) + + self.One = float64(avg[0]) + self.Five = float64(avg[1]) + self.Fifteen = float64(avg[2]) + + return nil +} + +func (self *Uptime) Get() error { + tv := syscall.Timeval32{} + + if err := sysctlbyname("kern.boottime", &tv); err != nil { + return err + } + + self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds() + + return nil +} + +func (self *Mem) Get() error { + var vmstat C.vm_statistics_data_t + + if err := sysctlbyname("hw.memsize", &self.Total); err != nil { + return err + } + + if err := vm_info(&vmstat); err != nil { + return err + } + + kern := uint64(vmstat.inactive_count) << 12 + self.Free = uint64(vmstat.free_count) << 12 + + self.Used = self.Total - self.Free + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +type xsw_usage struct { + Total, Avail, Used uint64 +} + +func (self *Swap) Get() error { + sw_usage := xsw_usage{} + + if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil { + return err + } + + self.Total = sw_usage.Total + self.Used = sw_usage.Used + self.Free = sw_usage.Avail + + return nil +} + +func (self *Cpu) Get() error { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpuload C.host_cpu_load_info_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics error=%d", status) + } + + self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER]) + self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) + self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) + self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) + + return nil +} + +func (self *CpuList) Get() error { + var count C.mach_msg_type_number_t + var cpuload *C.processor_cpu_load_info_data_t + var ncpu C.natural_t + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) + + bbuf := bytes.NewBuffer(buf) + + self.List = make([]Cpu, 0, ncpu) + + for i := 0; i < int(ncpu); i++ { + cpu := Cpu{} + + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return err + } + + cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER]) + cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM]) + cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE]) + cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE]) + + self.List = append(self.List, cpu) + } + + return nil +} + +func (self *FileSystemList) Get() error { + num, err := getfsstat(nil, C.MNT_NOWAIT) + if num < 0 { + return err + } + + buf := make([]syscall.Statfs_t, num) + + num, err = getfsstat(buf, C.MNT_NOWAIT) + if err != nil { + return err + } + + fslist := make([]FileSystem, 0, num) + + for i := 0; i < num; i++ { + fs := FileSystem{} + + fs.DirName = bytePtrToString(&buf[i].Mntonname[0]) + fs.DevName = bytePtrToString(&buf[i].Mntfromname[0]) + fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0]) + + fslist = append(fslist, fs) + } + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0) + if n <= 0 { + return syscall.EINVAL + } + buf := make([]byte, n) + n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n) + if n <= 0 { + return syscall.ENOMEM + } + + var pid int32 + num := int(n) / binary.Size(pid) + list := make([]int, 0, num) + bbuf := bytes.NewBuffer(buf) + + for i := 0; i < num; i++ { + if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil { + return err + } + if pid == 0 { + continue + } + + list = append(list, int(pid)) + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Name = C.GoString(&info.pbsd.pbi_comm[0]) + + switch info.pbsd.pbi_status { + case C.SIDL: + self.State = RunStateIdle + case C.SRUN: + self.State = RunStateRun + case C.SSLEEP: + self.State = RunStateSleep + case C.SSTOP: + self.State = RunStateStop + case C.SZOMB: + self.State = RunStateZombie + default: + self.State = RunStateUnknown + } + + self.Ppid = int(info.pbsd.pbi_ppid) + + self.Tty = int(info.pbsd.e_tdev) + + self.Priority = int(info.ptinfo.pti_priority) + + self.Nice = int(info.pbsd.pbi_nice) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Size = uint64(info.ptinfo.pti_virtual_size) + self.Resident = uint64(info.ptinfo.pti_resident_size) + self.PageFaults = uint64(info.ptinfo.pti_faults) + + return nil +} + +func (self *ProcTime) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.User = + uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond) + + self.Sys = + uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond) + + self.Total = self.User + self.Sys + + self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) + + (uint64(info.pbsd.pbi_start_tvusec) / 1000) + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + var args []string + + argv := func(arg string) { + args = append(args, arg) + } + + err := kern_procargs(pid, nil, argv, nil) + + self.List = args + + return err +} + +func (self *ProcExe) Get(pid int) error { + exe := func(arg string) { + self.Name = arg + } + + return kern_procargs(pid, exe, nil, nil) +} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, + exe func(string), + argv func(string), + env func(string, string)) error { + + mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} + argmax := uintptr(C.ARG_MAX) + buf := make([]byte, argmax) + err := sysctl(mib, &buf[0], &argmax, nil, 0) + if err != nil { + return nil + } + + bbuf := bytes.NewBuffer(buf) + bbuf.Truncate(int(argmax)) + + var argc int32 + binary.Read(bbuf, binary.LittleEndian, &argc) + + path, err := bbuf.ReadBytes(0) + if exe != nil { + exe(string(chop(path))) + } + + // skip trailing \0's + for { + c, _ := bbuf.ReadByte() + if c != 0 { + bbuf.UnreadByte() + break // start of argv[0] + } + } + + for i := 0; i < int(argc); i++ { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + if argv != nil { + argv(string(chop(arg))) + } + } + + if env == nil { + return nil + } + + delim := []byte{61} // "=" + + for { + line, err := bbuf.ReadBytes(0) + if err == io.EOF || line[0] == 0 { + break + } + pair := bytes.SplitN(chop(line), delim, 2) + env(string(pair[0]), string(pair[1])) + } + + return nil +} + +// XXX copied from zsyscall_darwin_amd64.go +func sysctl(mib []C.int, old *byte, oldlen *uintptr, + new *byte, newlen uintptr) (err error) { + var p0 unsafe.Pointer + p0 = unsafe.Pointer(&mib[0]) + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = e1 + } + return +} + +func vm_info(vmstat *C.vm_statistics_data_t) error { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT + + status := C.host_statistics( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics=%d", status) + } + + return nil +} + +// generic Sysctl buffer unmarshalling +func sysctlbyname(name string, data interface{}) (err error) { + val, err := syscall.Sysctl(name) + if err != nil { + return err + } + + buf := []byte(val) + + switch v := data.(type) { + case *uint64: + *v = *(*uint64)(unsafe.Pointer(&buf[0])) + return + } + + bbuf := bytes.NewBuffer([]byte(val)) + return binary.Read(bbuf, binary.LittleEndian, data) +} + +// syscall.Getfsstat() wrapper is broken, roll our own to workaround. +func getfsstat(buf []syscall.Statfs_t, flags int) (n int, err error) { + var ptr uintptr + var size uintptr + + if len(buf) > 0 { + ptr = uintptr(unsafe.Pointer(&buf[0])) + size = unsafe.Sizeof(buf[0]) * uintptr(len(buf)) + } else { + ptr = uintptr(0) + size = uintptr(0) + } + + trap := uintptr(syscall.SYS_GETFSSTAT64) + ret, _, errno := syscall.Syscall(trap, ptr, size, uintptr(flags)) + + n = int(ret) + if errno != 0 { + err = errno + } + + return +} + +func task_info(pid int, info *C.struct_proc_taskallinfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if n != size { + return syscall.ENOMEM + } + + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_format.go b/vendor/github.com/cloudfoundry/gosigar/sigar_format.go new file mode 100644 index 0000000..d80a64e --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_format.go @@ -0,0 +1,126 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "time" +) + +// Go version of apr_strfsize +func FormatSize(size uint64) string { + ord := []string{"K", "M", "G", "T", "P", "E"} + o := 0 + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + + if size < 973 { + fmt.Fprintf(w, "%3d ", size) + w.Flush() + return buf.String() + } + + for { + remain := size & 1023 + size >>= 10 + + if size >= 973 { + o++ + continue + } + + if size < 9 || (size == 9 && remain < 973) { + remain = ((remain * 5) + 256) / 512 + if remain >= 10 { + size++ + remain = 0 + } + + fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o]) + break + } + + if remain >= 512 { + size++ + } + + fmt.Fprintf(w, "%3d%s", size, ord[o]) + break + } + + w.Flush() + return buf.String() +} + +func FormatPercent(percent float64) string { + return strconv.FormatFloat(percent, 'f', -1, 64) + "%" +} + +func (self *FileSystemUsage) UsePercent() float64 { + b_used := (self.Total - self.Free) / 1024 + b_avail := self.Avail / 1024 + utotal := b_used + b_avail + used := b_used + + if utotal != 0 { + u100 := used * 100 + pct := u100 / utotal + if u100%utotal != 0 { + pct += 1 + } + return (float64(pct) / float64(100)) * 100.0 + } + + return 0.0 +} + +func (self *Uptime) Format() string { + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + uptime := uint64(self.Length) + + days := uptime / (60 * 60 * 24) + + if days != 0 { + s := "" + if days > 1 { + s = "s" + } + fmt.Fprintf(w, "%d day%s, ", days, s) + } + + minutes := uptime / 60 + hours := minutes / 60 + hours %= 24 + minutes %= 60 + + fmt.Fprintf(w, "%2d:%02d", hours, minutes) + + w.Flush() + return buf.String() +} + +func (self *ProcTime) FormatStartTime() string { + if self.StartTime == 0 { + return "00:00" + } + start := time.Unix(int64(self.StartTime)/1000, 0) + format := "Jan02" + if time.Since(start).Seconds() < (60 * 60 * 24) { + format = "15:04" + } + return start.Format(format) +} + +func (self *ProcTime) FormatTotal() string { + t := self.Total / 1000 + ss := t % 60 + t /= 60 + mm := t % 60 + t /= 60 + hh := t % 24 + return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss) +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go b/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go new file mode 100644 index 0000000..dd72a76 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go @@ -0,0 +1,141 @@ +package sigar + +import ( + "time" +) + +type Sigar interface { + CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) + GetLoadAverage() (LoadAverage, error) + GetMem() (Mem, error) + GetSwap() (Swap, error) + GetFileSystemUsage(string) (FileSystemUsage, error) +} + +type Cpu struct { + User uint64 + Nice uint64 + Sys uint64 + Idle uint64 + Wait uint64 + Irq uint64 + SoftIrq uint64 + Stolen uint64 +} + +func (cpu *Cpu) Total() uint64 { + return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle + + cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen +} + +func (cpu Cpu) Delta(other Cpu) Cpu { + return Cpu{ + User: cpu.User - other.User, + Nice: cpu.Nice - other.Nice, + Sys: cpu.Sys - other.Sys, + Idle: cpu.Idle - other.Idle, + Wait: cpu.Wait - other.Wait, + Irq: cpu.Irq - other.Irq, + SoftIrq: cpu.SoftIrq - other.SoftIrq, + Stolen: cpu.Stolen - other.Stolen, + } +} + +type LoadAverage struct { + One, Five, Fifteen float64 +} + +type Uptime struct { + Length float64 +} + +type Mem struct { + Total uint64 + Used uint64 + Free uint64 + ActualFree uint64 + ActualUsed uint64 +} + +type Swap struct { + Total uint64 + Used uint64 + Free uint64 +} + +type CpuList struct { + List []Cpu +} + +type FileSystem struct { + DirName string + DevName string + TypeName string + SysTypeName string + Options string + Flags uint32 +} + +type FileSystemList struct { + List []FileSystem +} + +type FileSystemUsage struct { + Total uint64 + Used uint64 + Free uint64 + Avail uint64 + Files uint64 + FreeFiles uint64 +} + +type ProcList struct { + List []int +} + +type RunState byte + +const ( + RunStateSleep = 'S' + RunStateRun = 'R' + RunStateStop = 'T' + RunStateZombie = 'Z' + RunStateIdle = 'D' + RunStateUnknown = '?' +) + +type ProcState struct { + Name string + State RunState + Ppid int + Tty int + Priority int + Nice int + Processor int +} + +type ProcMem struct { + Size uint64 + Resident uint64 + Share uint64 + MinorFaults uint64 + MajorFaults uint64 + PageFaults uint64 +} + +type ProcTime struct { + StartTime uint64 + User uint64 + Sys uint64 + Total uint64 +} + +type ProcArgs struct { + List []string +} + +type ProcExe struct { + Name string + Cwd string + Root string +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go b/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go new file mode 100644 index 0000000..68ffb0f --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go @@ -0,0 +1,386 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "syscall" +) + +var system struct { + ticks uint64 + btime uint64 +} + +var Procd string + +func init() { + system.ticks = 100 // C.sysconf(C._SC_CLK_TCK) + + Procd = "/proc" + + // grab system boot time + readFile(Procd+"/stat", func(line string) bool { + if strings.HasPrefix(line, "btime") { + system.btime, _ = strtoull(line[6:]) + return false // stop reading + } + return true + }) +} + +func (self *LoadAverage) Get() error { + line, err := ioutil.ReadFile(Procd + "/loadavg") + if err != nil { + return nil + } + + fields := strings.Fields(string(line)) + + self.One, _ = strconv.ParseFloat(fields[0], 64) + self.Five, _ = strconv.ParseFloat(fields[1], 64) + self.Fifteen, _ = strconv.ParseFloat(fields[2], 64) + + return nil +} + +func (self *Uptime) Get() error { + sysinfo := syscall.Sysinfo_t{} + + if err := syscall.Sysinfo(&sysinfo); err != nil { + return err + } + + self.Length = float64(sysinfo.Uptime) + + return nil +} + +func (self *Mem) Get() error { + var buffers, cached uint64 + table := map[string]*uint64{ + "MemTotal": &self.Total, + "MemFree": &self.Free, + "Buffers": &buffers, + "Cached": &cached, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + kern := buffers + cached + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +func (self *Swap) Get() error { + table := map[string]*uint64{ + "SwapTotal": &self.Total, + "SwapFree": &self.Free, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + return nil +} + +func (self *Cpu) Get() error { + return readFile(Procd+"/stat", func(line string) bool { + if len(line) > 4 && line[0:4] == "cpu " { + parseCpuStat(self, line) + return false + } + return true + + }) +} + +func (self *CpuList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 4 + } + list := make([]Cpu, 0, capacity) + + err := readFile(Procd+"/stat", func(line string) bool { + if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' { + cpu := Cpu{} + parseCpuStat(&cpu, line) + list = append(list, cpu) + } + return true + }) + + self.List = list + + return err +} + +func (self *FileSystemList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 10 + } + fslist := make([]FileSystem, 0, capacity) + + err := readFile("/etc/mtab", func(line string) bool { + fields := strings.Fields(line) + + fs := FileSystem{} + fs.DevName = fields[0] + fs.DirName = fields[1] + fs.SysTypeName = fields[2] + fs.Options = fields[3] + + fslist = append(fslist, fs) + + return true + }) + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + dir, err := os.Open(Procd) + if err != nil { + return err + } + defer dir.Close() + + const readAllDirnames = -1 // see os.File.Readdirnames doc + + names, err := dir.Readdirnames(readAllDirnames) + if err != nil { + return err + } + + capacity := len(names) + list := make([]int, 0, capacity) + + for _, name := range names { + if name[0] < '0' || name[0] > '9' { + continue + } + pid, err := strconv.Atoi(name) + if err == nil { + list = append(list, pid) + } + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + self.Name = fields[1][1 : len(fields[1])-1] // strip ()'s + + self.State = RunState(fields[2][0]) + + self.Ppid, _ = strconv.Atoi(fields[3]) + + self.Tty, _ = strconv.Atoi(fields[6]) + + self.Priority, _ = strconv.Atoi(fields[17]) + + self.Nice, _ = strconv.Atoi(fields[18]) + + self.Processor, _ = strconv.Atoi(fields[38]) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + contents, err := readProcFile(pid, "statm") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + size, _ := strtoull(fields[0]) + self.Size = size << 12 + + rss, _ := strtoull(fields[1]) + self.Resident = rss << 12 + + share, _ := strtoull(fields[2]) + self.Share = share << 12 + + contents, err = readProcFile(pid, "stat") + if err != nil { + return err + } + + fields = strings.Fields(string(contents)) + + self.MinorFaults, _ = strtoull(fields[10]) + self.MajorFaults, _ = strtoull(fields[12]) + self.PageFaults = self.MinorFaults + self.MajorFaults + + return nil +} + +func (self *ProcTime) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + user, _ := strtoull(fields[13]) + sys, _ := strtoull(fields[14]) + // convert to millis + self.User = user * (1000 / system.ticks) + self.Sys = sys * (1000 / system.ticks) + self.Total = self.User + self.Sys + + // convert to millis + self.StartTime, _ = strtoull(fields[21]) + self.StartTime /= system.ticks + self.StartTime += system.btime + self.StartTime *= 1000 + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + contents, err := readProcFile(pid, "cmdline") + if err != nil { + return err + } + + bbuf := bytes.NewBuffer(contents) + + var args []string + + for { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + args = append(args, string(chop(arg))) + } + + self.List = args + + return nil +} + +func (self *ProcExe) Get(pid int) error { + fields := map[string]*string{ + "exe": &self.Name, + "cwd": &self.Cwd, + "root": &self.Root, + } + + for name, field := range fields { + val, err := os.Readlink(procFileName(pid, name)) + + if err != nil { + return err + } + + *field = val + } + + return nil +} + +func parseMeminfo(table map[string]*uint64) error { + return readFile(Procd+"/meminfo", func(line string) bool { + fields := strings.Split(line, ":") + + if ptr := table[fields[0]]; ptr != nil { + num := strings.TrimLeft(fields[1], " ") + val, err := strtoull(strings.Fields(num)[0]) + if err == nil { + *ptr = val * 1024 + } + } + + return true + }) +} + +func parseCpuStat(self *Cpu, line string) error { + fields := strings.Fields(line) + + self.User, _ = strtoull(fields[1]) + self.Nice, _ = strtoull(fields[2]) + self.Sys, _ = strtoull(fields[3]) + self.Idle, _ = strtoull(fields[4]) + self.Wait, _ = strtoull(fields[5]) + self.Irq, _ = strtoull(fields[6]) + self.SoftIrq, _ = strtoull(fields[7]) + self.Stolen, _ = strtoull(fields[8]) + + return nil +} + +func readFile(file string, handler func(string) bool) error { + contents, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + + for { + line, _, err := reader.ReadLine() + if err == io.EOF { + break + } + if !handler(string(line)) { + break + } + } + + return nil +} + +func strtoull(val string) (uint64, error) { + return strconv.ParseUint(val, 10, 64) +} + +func procFileName(pid int, name string) string { + return Procd + "/" + strconv.Itoa(pid) + "/" + name +} + +func readProcFile(pid int, name string) ([]byte, error) { + path := procFileName(pid, name) + contents, err := ioutil.ReadFile(path) + + if err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err == syscall.ENOENT { + return nil, syscall.ESRCH + } + } + } + + return contents, err +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go b/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go new file mode 100644 index 0000000..39f1878 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go @@ -0,0 +1,26 @@ +// Copyright (c) 2012 VMware, Inc. + +// +build darwin freebsd linux netbsd openbsd + +package sigar + +import "syscall" + +func (self *FileSystemUsage) Get(path string) error { + stat := syscall.Statfs_t{} + err := syscall.Statfs(path, &stat) + if err != nil { + return err + } + + bsize := stat.Bsize / 512 + + self.Total = (uint64(stat.Blocks) * uint64(bsize)) >> 1 + self.Free = (uint64(stat.Bfree) * uint64(bsize)) >> 1 + self.Avail = (uint64(stat.Bavail) * uint64(bsize)) >> 1 + self.Used = self.Total - self.Free + self.Files = stat.Files + self.FreeFiles = stat.Ffree + + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_util.go b/vendor/github.com/cloudfoundry/gosigar/sigar_util.go new file mode 100644 index 0000000..a02df94 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_util.go @@ -0,0 +1,22 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "unsafe" +) + +func bytePtrToString(ptr *int8) string { + bytes := (*[10000]byte)(unsafe.Pointer(ptr)) + + n := 0 + for bytes[n] != 0 { + n++ + } + + return string(bytes[0:n]) +} + +func chop(buf []byte) []byte { + return buf[0 : len(buf)-1] +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go b/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go new file mode 100644 index 0000000..0c779d7 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +func init() { +} + +func (self *LoadAverage) Get() error { + return nil +} + +func (self *Uptime) Get() error { + return nil +} + +func (self *Mem) Get() error { + var statex C.MEMORYSTATUSEX + statex.dwLength = C.DWORD(unsafe.Sizeof(statex)) + + succeeded := C.GlobalMemoryStatusEx(&statex) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GlobalMemoryStatusEx failed with error: %d", int(lastError)) + } + + self.Total = uint64(statex.ullTotalPhys) + return nil +} + +func (self *Swap) Get() error { + return notImplemented() +} + +func (self *Cpu) Get() error { + return notImplemented() +} + +func (self *CpuList) Get() error { + return notImplemented() +} + +func (self *FileSystemList) Get() error { + return notImplemented() +} + +func (self *ProcList) Get() error { + return notImplemented() +} + +func (self *ProcState) Get(pid int) error { + return notImplemented() +} + +func (self *ProcMem) Get(pid int) error { + return notImplemented() +} + +func (self *ProcTime) Get(pid int) error { + return notImplemented() +} + +func (self *ProcArgs) Get(pid int) error { + return notImplemented() +} + +func (self *ProcExe) Get(pid int) error { + return notImplemented() +} + +func (self *FileSystemUsage) Get(path string) error { + var availableBytes C.ULARGE_INTEGER + var totalBytes C.ULARGE_INTEGER + var totalFreeBytes C.ULARGE_INTEGER + + pathChars := C.CString(path) + defer C.free(unsafe.Pointer(pathChars)) + + succeeded := C.GetDiskFreeSpaceEx((*C.CHAR)(pathChars), &availableBytes, &totalBytes, &totalFreeBytes) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GetDiskFreeSpaceEx failed with error: %d", int(lastError)) + } + + self.Total = *(*uint64)(unsafe.Pointer(&totalBytes)) + return nil +} + +func notImplemented() error { + panic("Not Implemented") + return nil +} diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE new file mode 100644 index 0000000..109b6bd --- /dev/null +++ b/vendor/github.com/containerd/console/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2017-infinity Michael Crosby. crosbymichael@gmail.com + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH +THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md new file mode 100644 index 0000000..3881cd4 --- /dev/null +++ b/vendor/github.com/containerd/console/README.md @@ -0,0 +1,44 @@ +# console + +[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console) + +Golang package for dealing with consoles. Light on deps and a simple API. + +## Modifying the current process + +```go +current := console.Current() +defer current.Reset() + +if err := current.SetRaw(); err != nil { +} +ws, err := current.Size() +current.Resize(ws) +``` + +## License - MIT + +Copyright (c) 2017-infinity Michael Crosby. crosbymichael@gmail.com + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH +THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go new file mode 100644 index 0000000..08d255d --- /dev/null +++ b/vendor/github.com/containerd/console/console.go @@ -0,0 +1,52 @@ +package console + +import ( + "errors" + "io" + "os" +) + +var ErrNotAConsole = errors.New("provided file is not a console") + +type Console interface { + io.Reader + io.Writer + io.Closer + + // Resize resizes the console to the provided window size + Resize(WinSize) error + // ResizeFrom resizes the calling console to the size of the + // provided console + ResizeFrom(Console) error + // SetRaw sets the console in raw mode + SetRaw() error + // DisableEcho disables echo on the console + DisableEcho() error + // Reset restores the console to its orignal state + Reset() error + // Size returns the window size of the console + Size() (WinSize, error) +} + +// WinSize specifies the window size of the console +type WinSize struct { + // Height of the console + Height uint16 + // Width of the console + Width uint16 + x uint16 + y uint16 +} + +// Current returns the current processes console +func Current() Console { + return newMaster(os.Stdin) +} + +// ConsoleFromFile returns a console using the provided file +func ConsoleFromFile(f *os.File) (Console, error) { + if err := checkConsole(f); err != nil { + return nil, err + } + return newMaster(f), nil +} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go new file mode 100644 index 0000000..fb27011 --- /dev/null +++ b/vendor/github.com/containerd/console/console_unix.go @@ -0,0 +1,133 @@ +// +build darwin freebsd linux + +package console + +// #include +import "C" + +import ( + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +// NewPty creates a new pty pair +// The master is returned as the first console and a string +// with the path to the pty slave is returned as the second +func NewPty() (Console, string, error) { + f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + if err := saneTerminal(f); err != nil { + return nil, "", err + } + slave, err := ptsname(f) + if err != nil { + return nil, "", err + } + if err := unlockpt(f); err != nil { + return nil, "", err + } + return &master{ + f: f, + }, slave, nil +} + +type master struct { + f *os.File + original *unix.Termios +} + +func (m *master) Read(b []byte) (int, error) { + return m.f.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return m.f.Write(b) +} + +func (m *master) Close() error { + return m.f.Close() +} + +func (m *master) Resize(ws WinSize) error { + return ioctl( + m.f.Fd(), + uintptr(unix.TIOCSWINSZ), + uintptr(unsafe.Pointer(&ws)), + ) +} + +func (m *master) ResizeFrom(c Console) error { + ws, err := c.Size() + if err != nil { + return err + } + return m.Resize(ws) +} + +func (m *master) Reset() error { + if m.original == nil { + return nil + } + return tcset(m.f.Fd(), m.original) +} + +func (m *master) getCurrent() (unix.Termios, error) { + var termios unix.Termios + if err := tcget(m.f.Fd(), &termios); err != nil { + return unix.Termios{}, err + } + if m.original == nil { + m.original = &termios + } + return termios, nil +} + +func (m *master) SetRaw() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&rawState))) + rawState.Oflag = rawState.Oflag | C.OPOST + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) DisableEcho() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState.Lflag = rawState.Lflag &^ unix.ECHO + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) Size() (WinSize, error) { + var ws WinSize + if err := ioctl( + m.f.Fd(), + uintptr(unix.TIOCGWINSZ), + uintptr(unsafe.Pointer(&ws)), + ); err != nil { + return ws, err + } + return ws, nil +} + +// checkConsole checks if the provided file is a console +func checkConsole(f *os.File) error { + var termios unix.Termios + if tcget(f.Fd(), &termios) != nil { + return ErrNotAConsole + } + return nil +} + +func newMaster(f *os.File) Console { + return &master{ + f: f, + } +} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go new file mode 100644 index 0000000..8d133ea --- /dev/null +++ b/vendor/github.com/containerd/console/console_windows.go @@ -0,0 +1,199 @@ +package console + +import ( + "fmt" + "os" + + "github.com/Azure/go-ansiterm/winterm" + "github.com/pkg/errors" +) + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + enableVirtualTerminalInput = 0x0200 + enableVirtualTerminalProcessing = 0x0004 + disableNewlineAutoReturn = 0x0008 +) + +var ( + vtInputSupported bool + ErrNotImplemented = errors.New("not implemented") +) + +func (m *master) initStdios() { + m.in = os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(m.in); err == nil { + m.inMode = mode + // Validate that enableVirtualTerminalInput is supported, but do not set it. + if err = winterm.SetConsoleMode(m.in, mode|enableVirtualTerminalInput); err == nil { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(m.in, mode) + } else { + fmt.Printf("failed to get console mode for stdin: %v\n", err) + } + + m.out = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(m.out); err == nil { + m.outMode = mode + if err := winterm.SetConsoleMode(m.out, mode|enableVirtualTerminalProcessing); err == nil { + m.outMode |= enableVirtualTerminalProcessing + } else { + winterm.SetConsoleMode(m.out, m.outMode) + } + } else { + fmt.Printf("failed to get console mode for stdout: %v\n", err) + } + + m.err = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(m.err); err == nil { + m.errMode = mode + if err := winterm.SetConsoleMode(m.err, mode|enableVirtualTerminalProcessing); err == nil { + m.errMode |= enableVirtualTerminalProcessing + } else { + winterm.SetConsoleMode(m.err, m.errMode) + } + } else { + fmt.Printf("failed to get console mode for stderr: %v\n", err) + } +} + +type master struct { + in uintptr + inMode uint32 + + out uintptr + outMode uint32 + + err uintptr + errMode uint32 +} + +func (m *master) SetRaw() error { + if err := makeInputRaw(m.in, m.inMode); err != nil { + return err + } + + // Set StdOut and StdErr to raw mode, we ignore failures since + // disableNewlineAutoReturn might not be supported on this version of + // Windows. + + winterm.SetConsoleMode(m.out, m.outMode|disableNewlineAutoReturn) + + winterm.SetConsoleMode(m.err, m.errMode|disableNewlineAutoReturn) + + return nil +} + +func (m *master) Reset() error { + for _, s := range []struct { + fd uintptr + mode uint32 + }{ + {m.in, m.inMode}, + {m.out, m.outMode}, + {m.err, m.errMode}, + } { + if err := winterm.SetConsoleMode(s.fd, s.mode); err != nil { + return errors.Wrap(err, "unable to restore console mode") + } + } + + return nil +} + +func (m *master) Size() (WinSize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(m.out) + if err != nil { + return WinSize{}, errors.Wrap(err, "unable to get console info") + } + + winsize := WinSize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func (m *master) Resize(ws WinSize) error { + return ErrNotImplemented +} + +func (m *master) ResizeFrom(c Console) error { + return ErrNotImplemented +} + +func (m *master) DisableEcho() error { + mode := m.inMode &^ winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT + mode |= winterm.ENABLE_LINE_INPUT + + if err := winterm.SetConsoleMode(m.in, mode); err != nil { + return errors.Wrap(err, "unable to set console to disable echo") + } + + return nil +} + +func (m *master) Close() error { + return nil +} + +func (m *master) Read(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Write(b []byte) (int, error) { + panic("not implemented on windows") +} + +// makeInputRaw puts the terminal (Windows Console) connected to the given +// file descriptor into raw mode +func makeInputRaw(fd uintptr, mode uint32) error { + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + + if vtInputSupported { + mode |= enableVirtualTerminalInput + } + + if err := winterm.SetConsoleMode(fd, mode); err != nil { + return errors.Wrap(err, "unable to set console to raw mode") + } + + return nil +} + +func checkConsole(f *os.File) error { + if _, err := winterm.GetConsoleMode(f.Fd()); err != nil { + return err + } + return nil +} + +func newMaster(f *os.File) Console { + if f != os.Stdin && f != os.Stdout && f != os.Stderr { + panic("creating a console from a file is not supported on windows") + } + + m := &master{} + m.initStdios() + + return m +} diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go new file mode 100644 index 0000000..d86c315 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_darwin.go @@ -0,0 +1,51 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + return ioctl(fd, unix.TIOCGETA, uintptr(unsafe.Pointer(p))) +} + +func tcset(fd uintptr, p *unix.Termios) error { + return ioctl(fd, unix.TIOCSETA, uintptr(unsafe.Pointer(p))) +} + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + var n int32 + if err := ioctl(f.Fd(), unix.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} + +func saneTerminal(f *os.File) error { + // Go doesn't have a wrapper for any of the termios ioctls. + var termios unix.Termios + if err := tcget(f.Fd(), &termios); err != nil { + return err + } + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + return tcset(f.Fd(), &termios) +} diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go new file mode 100644 index 0000000..478f60a --- /dev/null +++ b/vendor/github.com/containerd/console/tc_freebsd.go @@ -0,0 +1,51 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + return ioctl(fd, unix.TIOCGETA, uintptr(unsafe.Pointer(p))) +} + +func tcset(fd uintptr, p *unix.Termios) error { + return ioctl(fd, unix.TIOCSETA, uintptr(unsafe.Pointer(p))) +} + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +// This does not exist on FreeBSD, it does not allocate controlling terminals on open +func unlockpt(f *os.File) error { + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + var n int32 + if err := ioctl(f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} + +func saneTerminal(f *os.File) error { + // Go doesn't have a wrapper for any of the termios ioctls. + var termios unix.Termios + if err := tcget(f.Fd(), &termios); err != nil { + return err + } + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + return tcset(f.Fd(), &termios) +} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go new file mode 100644 index 0000000..67c89f0 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_linux.go @@ -0,0 +1,51 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + return ioctl(fd, unix.TCGETS, uintptr(unsafe.Pointer(p))) +} + +func tcset(fd uintptr, p *unix.Termios) error { + return ioctl(fd, unix.TCSETS, uintptr(unsafe.Pointer(p))) +} + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + var n int32 + if err := ioctl(f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} + +func saneTerminal(f *os.File) error { + // Go doesn't have a wrapper for any of the termios ioctls. + var termios unix.Termios + if err := tcget(f.Fd(), &termios); err != nil { + return err + } + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + return tcset(f.Fd(), &termios) +} diff --git a/vendor/github.com/containerd/containerd/LICENSE.code b/vendor/github.com/containerd/containerd/LICENSE.code new file mode 100644 index 0000000..8f3fee6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/LICENSE.docs b/vendor/github.com/containerd/containerd/LICENSE.docs new file mode 100644 index 0000000..e26cd4f --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 0000000..8915f02 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md new file mode 100644 index 0000000..6741966 --- /dev/null +++ b/vendor/github.com/containerd/containerd/README.md @@ -0,0 +1,72 @@ +# containerd + +containerd is a daemon to control runC, built for performance and density. +containerd leverages runC's advanced features such as seccomp and user namespace support as well +as checkpoint and restore for cloning and live migration of containers. + +## Getting started + +The easiest way to start using containerd is to download binaries from the [releases page](https://github.com/containerd/containerd/releases). + +The included `ctr` command-line tool allows you interact with the containerd daemon: + +``` +$ sudo ctr containers start redis /containers/redis +$ sudo ctr containers list +ID PATH STATUS PROCESSES +redis /containers/redis running 14063 +``` + +`/containers/redis` is the path to an OCI bundle. [See the docs for more information.](docs/bundle.md) + +## Docs + + * [Client CLI reference (`ctr`)](docs/cli.md) + * [Daemon CLI reference (`containerd`)](docs/daemon.md) + * [Creating OCI bundles](docs/bundle.md) + * [containerd changes to the bundle](docs/bundle-changes.md) + * [Attaching to STDIO or TTY](docs/attach.md) + * [Telemetry and metrics](docs/telemetry.md) + +All documentation is contained in the `/docs` directory in this repository. + +## Building + +You will need to make sure that you have Go installed on your system and the containerd repository is cloned +in your `$GOPATH`. You will also need to make sure that you have all the dependencies cloned as well. +Currently, contributing to containerd is not for the first time devs as many dependencies are not vendored and +work is being completed at a high rate. + +After that just run `make` and the binaries for the daemon and client will be localed in the `bin/` directory. + +## Performance + +Starting 1000 containers concurrently runs at 126-140 containers per second. + +Overall start times: + +``` +[containerd] 2015/12/04 15:00:54 count: 1000 +[containerd] 2015/12/04 14:59:54 min: 23ms +[containerd] 2015/12/04 14:59:54 max: 355ms +[containerd] 2015/12/04 14:59:54 mean: 78ms +[containerd] 2015/12/04 14:59:54 stddev: 34ms +[containerd] 2015/12/04 14:59:54 median: 73ms +[containerd] 2015/12/04 14:59:54 75%: 91ms +[containerd] 2015/12/04 14:59:54 95%: 123ms +[containerd] 2015/12/04 14:59:54 99%: 287ms +[containerd] 2015/12/04 14:59:54 99.9%: 355ms +``` + +## Roadmap + +The current roadmap and milestones for alpha and beta completion are in the github issues on this repository. Please refer to these issues for what is being worked on and completed for the various stages of development. + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/containerd/containerd/api/grpc/server/server.go b/vendor/github.com/containerd/containerd/api/grpc/server/server.go new file mode 100644 index 0000000..89ba87c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/grpc/server/server.go @@ -0,0 +1,494 @@ +package server + +import ( + "bufio" + "errors" + "fmt" + "os" + "strconv" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/api/grpc/types" + "github.com/containerd/containerd/runtime" + "github.com/containerd/containerd/supervisor" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" +) + +type apiServer struct { + sv *supervisor.Supervisor +} + +// NewServer returns grpc server instance +func NewServer(sv *supervisor.Supervisor) types.APIServer { + return &apiServer{ + sv: sv, + } +} + +func (s *apiServer) GetServerVersion(ctx context.Context, c *types.GetServerVersionRequest) (*types.GetServerVersionResponse, error) { + return &types.GetServerVersionResponse{ + Major: containerd.VersionMajor, + Minor: containerd.VersionMinor, + Patch: containerd.VersionPatch, + Revision: containerd.GitCommit, + }, nil +} + +func (s *apiServer) CreateContainer(ctx context.Context, c *types.CreateContainerRequest) (*types.CreateContainerResponse, error) { + if c.BundlePath == "" { + return nil, errors.New("empty bundle path") + } + e := &supervisor.StartTask{} + e.ID = c.Id + e.BundlePath = c.BundlePath + e.Stdin = c.Stdin + e.Stdout = c.Stdout + e.Stderr = c.Stderr + e.Labels = c.Labels + e.NoPivotRoot = c.NoPivotRoot + e.Runtime = c.Runtime + e.RuntimeArgs = c.RuntimeArgs + e.StartResponse = make(chan supervisor.StartResponse, 1) + e.Ctx = ctx + if c.Checkpoint != "" { + e.CheckpointDir = c.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: c.Checkpoint, + } + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + r := <-e.StartResponse + apiC, err := createAPIContainer(r.Container, false) + if err != nil { + return nil, err + } + return &types.CreateContainerResponse{ + Container: apiC, + }, nil +} + +func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpointRequest) (*types.CreateCheckpointResponse, error) { + e := &supervisor.CreateCheckpointTask{} + e.ID = r.Id + e.CheckpointDir = r.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: r.Checkpoint.Name, + Exit: r.Checkpoint.Exit, + TCP: r.Checkpoint.Tcp, + UnixSockets: r.Checkpoint.UnixSockets, + Shell: r.Checkpoint.Shell, + EmptyNS: r.Checkpoint.EmptyNS, + } + + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.CreateCheckpointResponse{}, nil +} + +func (s *apiServer) DeleteCheckpoint(ctx context.Context, r *types.DeleteCheckpointRequest) (*types.DeleteCheckpointResponse, error) { + if r.Name == "" { + return nil, errors.New("checkpoint name cannot be empty") + } + e := &supervisor.DeleteCheckpointTask{} + e.ID = r.Id + e.CheckpointDir = r.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: r.Name, + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.DeleteCheckpointResponse{}, nil +} + +func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointRequest) (*types.ListCheckpointResponse, error) { + e := &supervisor.GetContainersTask{} + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + var container runtime.Container + for _, c := range e.Containers { + if c.ID() == r.Id { + container = c + break + } + } + if container == nil { + return nil, grpc.Errorf(codes.NotFound, "no such containers") + } + var out []*types.Checkpoint + checkpoints, err := container.Checkpoints(r.CheckpointDir) + if err != nil { + return nil, err + } + for _, c := range checkpoints { + out = append(out, &types.Checkpoint{ + Name: c.Name, + Tcp: c.TCP, + Shell: c.Shell, + UnixSockets: c.UnixSockets, + // TODO: figure out timestamp + //Timestamp: c.Timestamp, + }) + } + return &types.ListCheckpointResponse{Checkpoints: out}, nil +} + +func (s *apiServer) Signal(ctx context.Context, r *types.SignalRequest) (*types.SignalResponse, error) { + e := &supervisor.SignalTask{} + e.ID = r.Id + e.PID = r.Pid + e.Signal = syscall.Signal(int(r.Signal)) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.SignalResponse{}, nil +} + +func (s *apiServer) State(ctx context.Context, r *types.StateRequest) (*types.StateResponse, error) { + + getState := func(c runtime.Container) (interface{}, error) { + return createAPIContainer(c, true) + } + + e := &supervisor.GetContainersTask{} + e.ID = r.Id + e.GetState = getState + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + m := s.sv.Machine() + state := &types.StateResponse{ + Machine: &types.Machine{ + Cpus: uint32(m.Cpus), + Memory: uint64(m.Memory), + }, + } + for idx := range e.Containers { + state.Containers = append(state.Containers, e.States[idx].(*types.Container)) + } + return state, nil +} + +func createAPIContainer(c runtime.Container, getPids bool) (*types.Container, error) { + processes, err := c.Processes() + if err != nil { + return nil, grpc.Errorf(codes.Internal, "get processes for container: "+err.Error()) + } + var procs []*types.Process + for _, p := range processes { + oldProc := p.Spec() + stdio := p.Stdio() + proc := &types.Process{ + Pid: p.ID(), + SystemPid: uint32(p.SystemPid()), + Terminal: oldProc.Terminal, + Args: oldProc.Args, + Env: oldProc.Env, + Cwd: oldProc.Cwd, + Stdin: stdio.Stdin, + Stdout: stdio.Stdout, + Stderr: stdio.Stderr, + } + proc.User = &types.User{ + Uid: oldProc.User.UID, + Gid: oldProc.User.GID, + AdditionalGids: oldProc.User.AdditionalGids, + } + // FIXME: trying to mimic api compat with only using one field + proc.Capabilities = oldProc.Capabilities.Effective + proc.ApparmorProfile = oldProc.ApparmorProfile + proc.SelinuxLabel = oldProc.SelinuxLabel + proc.NoNewPrivileges = oldProc.NoNewPrivileges + for _, rl := range oldProc.Rlimits { + proc.Rlimits = append(proc.Rlimits, &types.Rlimit{ + Type: rl.Type, + Soft: rl.Soft, + Hard: rl.Hard, + }) + } + procs = append(procs, proc) + } + var pids []int + state := c.State() + if getPids && (state == runtime.Running || state == runtime.Paused) { + if pids, err = c.Pids(); err != nil { + return nil, grpc.Errorf(codes.Internal, "get all pids for container: "+err.Error()) + } + } + return &types.Container{ + Id: c.ID(), + BundlePath: c.Path(), + Processes: procs, + Labels: c.Labels(), + Status: string(state), + Pids: toUint32(pids), + Runtime: c.Runtime(), + }, nil +} + +func toUint32(its []int) []uint32 { + o := []uint32{} + for _, i := range its { + o = append(o, uint32(i)) + } + return o +} + +func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) { + e := &supervisor.UpdateTask{} + e.ID = r.Id + e.State = runtime.State(r.Status) + if r.Resources != nil { + rs := r.Resources + e.Resources = &runtime.Resource{} + if rs.CpuShares != 0 { + e.Resources.CPUShares = int64(rs.CpuShares) + } + if rs.BlkioWeight != 0 { + e.Resources.BlkioWeight = uint16(rs.BlkioWeight) + } + if rs.CpuPeriod != 0 { + e.Resources.CPUPeriod = int64(rs.CpuPeriod) + } + if rs.CpuQuota != 0 { + e.Resources.CPUQuota = int64(rs.CpuQuota) + } + if rs.CpusetCpus != "" { + e.Resources.CpusetCpus = rs.CpusetCpus + } + if rs.CpusetMems != "" { + e.Resources.CpusetMems = rs.CpusetMems + } + if rs.KernelMemoryLimit != 0 { + e.Resources.KernelMemory = int64(rs.KernelMemoryLimit) + } + if rs.KernelTCPMemoryLimit != 0 { + e.Resources.KernelTCPMemory = int64(rs.KernelTCPMemoryLimit) + } + if rs.MemoryLimit != 0 { + e.Resources.Memory = int64(rs.MemoryLimit) + } + if rs.MemoryReservation != 0 { + e.Resources.MemoryReservation = int64(rs.MemoryReservation) + } + if rs.MemorySwap != 0 { + e.Resources.MemorySwap = int64(rs.MemorySwap) + } + if rs.PidsLimit != 0 { + e.Resources.PidsLimit = int64(rs.PidsLimit) + } + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.UpdateContainerResponse{}, nil +} + +func (s *apiServer) UpdateProcess(ctx context.Context, r *types.UpdateProcessRequest) (*types.UpdateProcessResponse, error) { + e := &supervisor.UpdateProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.Height = int(r.Height) + e.Width = int(r.Width) + e.CloseStdin = r.CloseStdin + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.UpdateProcessResponse{}, nil +} + +func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error { + t := time.Time{} + if r.Timestamp != nil { + from, err := ptypes.Timestamp(r.Timestamp) + if err != nil { + return err + } + t = from + } + if r.StoredOnly && t.IsZero() { + return fmt.Errorf("invalid parameter: StoredOnly cannot be specified without setting a valid Timestamp") + } + events := s.sv.Events(t, r.StoredOnly, r.Id) + defer s.sv.Unsubscribe(events) + for e := range events { + tsp, err := ptypes.TimestampProto(e.Timestamp) + if err != nil { + return err + } + if r.Id == "" || e.ID == r.Id { + if err := stream.Send(&types.Event{ + Id: e.ID, + Type: e.Type, + Timestamp: tsp, + Pid: e.PID, + Status: uint32(e.Status), + }); err != nil { + return err + } + } + } + return nil +} + +func convertToPb(st *runtime.Stat) *types.StatsResponse { + tsp, _ := ptypes.TimestampProto(st.Timestamp) + pbSt := &types.StatsResponse{ + Timestamp: tsp, + CgroupStats: &types.CgroupStats{}, + } + systemUsage, _ := getSystemCPUUsage() + pbSt.CgroupStats.CpuStats = &types.CpuStats{ + CpuUsage: &types.CpuUsage{ + TotalUsage: st.CPU.Usage.Total, + PercpuUsage: st.CPU.Usage.Percpu, + UsageInKernelmode: st.CPU.Usage.Kernel, + UsageInUsermode: st.CPU.Usage.User, + }, + ThrottlingData: &types.ThrottlingData{ + Periods: st.CPU.Throttling.Periods, + ThrottledPeriods: st.CPU.Throttling.ThrottledPeriods, + ThrottledTime: st.CPU.Throttling.ThrottledTime, + }, + SystemUsage: systemUsage, + } + pbSt.CgroupStats.MemoryStats = &types.MemoryStats{ + Cache: st.Memory.Cache, + Usage: &types.MemoryData{ + Usage: st.Memory.Usage.Usage, + MaxUsage: st.Memory.Usage.Max, + Failcnt: st.Memory.Usage.Failcnt, + Limit: st.Memory.Usage.Limit, + }, + SwapUsage: &types.MemoryData{ + Usage: st.Memory.Swap.Usage, + MaxUsage: st.Memory.Swap.Max, + Failcnt: st.Memory.Swap.Failcnt, + Limit: st.Memory.Swap.Limit, + }, + KernelUsage: &types.MemoryData{ + Usage: st.Memory.Kernel.Usage, + MaxUsage: st.Memory.Kernel.Max, + Failcnt: st.Memory.Kernel.Failcnt, + Limit: st.Memory.Kernel.Limit, + }, + Stats: st.Memory.Raw, + } + pbSt.CgroupStats.BlkioStats = &types.BlkioStats{ + IoServiceBytesRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceBytesRecursive), + IoServicedRecursive: convertBlkioEntryToPb(st.Blkio.IoServicedRecursive), + IoQueuedRecursive: convertBlkioEntryToPb(st.Blkio.IoQueuedRecursive), + IoServiceTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceTimeRecursive), + IoWaitTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoWaitTimeRecursive), + IoMergedRecursive: convertBlkioEntryToPb(st.Blkio.IoMergedRecursive), + IoTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoTimeRecursive), + SectorsRecursive: convertBlkioEntryToPb(st.Blkio.SectorsRecursive), + } + pbSt.CgroupStats.HugetlbStats = make(map[string]*types.HugetlbStats) + for k, st := range st.Hugetlb { + pbSt.CgroupStats.HugetlbStats[k] = &types.HugetlbStats{ + Usage: st.Usage, + MaxUsage: st.Max, + Failcnt: st.Failcnt, + } + } + pbSt.CgroupStats.PidsStats = &types.PidsStats{ + Current: st.Pids.Current, + Limit: st.Pids.Limit, + } + return pbSt +} + +func convertBlkioEntryToPb(b []runtime.BlkioEntry) []*types.BlkioStatsEntry { + var pbEs []*types.BlkioStatsEntry + for _, e := range b { + pbEs = append(pbEs, &types.BlkioStatsEntry{ + Major: e.Major, + Minor: e.Minor, + Op: e.Op, + Value: e.Value, + }) + } + return pbEs +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + bufReader := bufio.NewReaderSize(nil, 128) + defer func() { + bufReader.Reset(nil) + f.Close() + }() + bufReader.Reset(f) + err = nil + for err == nil { + line, err = bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("bad format of cpu stats") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("error parsing cpu stats") + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("bad stats format") +} + +func (s *apiServer) Stats(ctx context.Context, r *types.StatsRequest) (*types.StatsResponse, error) { + e := &supervisor.StatsTask{} + e.ID = r.Id + e.Stat = make(chan *runtime.Stat, 1) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + stats := <-e.Stat + t := convertToPb(stats) + return t, nil +} diff --git a/vendor/github.com/containerd/containerd/api/grpc/server/server_linux.go b/vendor/github.com/containerd/containerd/api/grpc/server/server_linux.go new file mode 100644 index 0000000..92baf39 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/grpc/server/server_linux.go @@ -0,0 +1,66 @@ +package server + +import ( + "fmt" + + "github.com/containerd/containerd/api/grpc/types" + "github.com/containerd/containerd/specs" + "github.com/containerd/containerd/supervisor" + "github.com/opencontainers/runc/libcontainer/system" + ocs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +var clockTicksPerSecond = uint64(system.GetClockTicks()) + +func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { + process := &specs.ProcessSpec{ + Terminal: r.Terminal, + Args: r.Args, + Env: r.Env, + Cwd: r.Cwd, + } + process.User = ocs.User{ + UID: r.User.Uid, + GID: r.User.Gid, + AdditionalGids: r.User.AdditionalGids, + } + // for backwards compat in the API set eibp + process.Capabilities = &ocs.LinuxCapabilities{ + Bounding: r.Capabilities, + Effective: r.Capabilities, + Inheritable: r.Capabilities, + Permitted: r.Capabilities, + } + process.ApparmorProfile = r.ApparmorProfile + process.SelinuxLabel = r.SelinuxLabel + process.NoNewPrivileges = r.NoNewPrivileges + for _, rl := range r.Rlimits { + process.Rlimits = append(process.Rlimits, ocs.LinuxRlimit{ + Type: rl.Type, + Soft: rl.Soft, + Hard: rl.Hard, + }) + } + if r.Id == "" { + return nil, fmt.Errorf("container id cannot be empty") + } + if r.Pid == "" { + return nil, fmt.Errorf("process id cannot be empty") + } + e := &supervisor.AddProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.ProcessSpec = process + e.Stdin = r.Stdin + e.Stdout = r.Stdout + e.Stderr = r.Stderr + e.StartResponse = make(chan supervisor.StartResponse, 1) + e.Ctx = ctx + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + sr := <-e.StartResponse + return &types.AddProcessResponse{SystemPid: uint32(sr.ExecPid)}, nil +} diff --git a/vendor/github.com/containerd/containerd/api/grpc/server/server_solaris.go b/vendor/github.com/containerd/containerd/api/grpc/server/server_solaris.go new file mode 100644 index 0000000..5ad2734 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/grpc/server/server_solaris.go @@ -0,0 +1,41 @@ +package server + +import ( + "fmt" + + "github.com/containerd/containerd/api/grpc/types" + "github.com/containerd/containerd/specs" + "github.com/containerd/containerd/supervisor" + "golang.org/x/net/context" +) + +var clockTicksPerSecond uint64 + +func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { + process := &specs.ProcessSpec{ + Terminal: r.Terminal, + Args: r.Args, + Env: r.Env, + Cwd: r.Cwd, + } + if r.Id == "" { + return nil, fmt.Errorf("container id cannot be empty") + } + if r.Pid == "" { + return nil, fmt.Errorf("process id cannot be empty") + } + e := &supervisor.AddProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.ProcessSpec = process + e.Stdin = r.Stdin + e.Stdout = r.Stdout + e.Stderr = r.Stderr + e.StartResponse = make(chan supervisor.StartResponse, 1) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + <-e.StartResponse + return &types.AddProcessResponse{}, nil +} diff --git a/vendor/github.com/containerd/containerd/api/grpc/types/api.pb.go b/vendor/github.com/containerd/containerd/api/grpc/types/api.pb.go new file mode 100644 index 0000000..e6ef556 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/grpc/types/api.pb.go @@ -0,0 +1,2584 @@ +// Code generated by protoc-gen-go. +// source: api.proto +// DO NOT EDIT! + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + api.proto + +It has these top-level messages: + GetServerVersionRequest + GetServerVersionResponse + UpdateProcessRequest + UpdateProcessResponse + CreateContainerRequest + CreateContainerResponse + SignalRequest + SignalResponse + AddProcessRequest + Rlimit + User + AddProcessResponse + CreateCheckpointRequest + CreateCheckpointResponse + DeleteCheckpointRequest + DeleteCheckpointResponse + ListCheckpointRequest + Checkpoint + ListCheckpointResponse + StateRequest + ContainerState + Process + Container + Machine + StateResponse + UpdateContainerRequest + UpdateResource + BlockIODevice + WeightDevice + ThrottleDevice + UpdateContainerResponse + EventsRequest + Event + NetworkStats + CpuUsage + ThrottlingData + CpuStats + PidsStats + MemoryData + MemoryStats + BlkioStatsEntry + BlkioStats + HugetlbStats + CgroupStats + StatsResponse + StatsRequest +*/ +package types + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type GetServerVersionRequest struct { +} + +func (m *GetServerVersionRequest) Reset() { *m = GetServerVersionRequest{} } +func (m *GetServerVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetServerVersionRequest) ProtoMessage() {} +func (*GetServerVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type GetServerVersionResponse struct { + Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + Revision string `protobuf:"bytes,4,opt,name=revision" json:"revision,omitempty"` +} + +func (m *GetServerVersionResponse) Reset() { *m = GetServerVersionResponse{} } +func (m *GetServerVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetServerVersionResponse) ProtoMessage() {} +func (*GetServerVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GetServerVersionResponse) GetMajor() uint32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *GetServerVersionResponse) GetMinor() uint32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *GetServerVersionResponse) GetPatch() uint32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *GetServerVersionResponse) GetRevision() string { + if m != nil { + return m.Revision + } + return "" +} + +type UpdateProcessRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + CloseStdin bool `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"` + Width uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"` + Height uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"` +} + +func (m *UpdateProcessRequest) Reset() { *m = UpdateProcessRequest{} } +func (m *UpdateProcessRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateProcessRequest) ProtoMessage() {} +func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *UpdateProcessRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *UpdateProcessRequest) GetPid() string { + if m != nil { + return m.Pid + } + return "" +} + +func (m *UpdateProcessRequest) GetCloseStdin() bool { + if m != nil { + return m.CloseStdin + } + return false +} + +func (m *UpdateProcessRequest) GetWidth() uint32 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *UpdateProcessRequest) GetHeight() uint32 { + if m != nil { + return m.Height + } + return 0 +} + +type UpdateProcessResponse struct { +} + +func (m *UpdateProcessResponse) Reset() { *m = UpdateProcessResponse{} } +func (m *UpdateProcessResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateProcessResponse) ProtoMessage() {} +func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type CreateContainerRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` + Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` + NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` + Runtime string `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"` + RuntimeArgs []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"` + CheckpointDir string `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } +func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*CreateContainerRequest) ProtoMessage() {} +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *CreateContainerRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *CreateContainerRequest) GetBundlePath() string { + if m != nil { + return m.BundlePath + } + return "" +} + +func (m *CreateContainerRequest) GetCheckpoint() string { + if m != nil { + return m.Checkpoint + } + return "" +} + +func (m *CreateContainerRequest) GetStdin() string { + if m != nil { + return m.Stdin + } + return "" +} + +func (m *CreateContainerRequest) GetStdout() string { + if m != nil { + return m.Stdout + } + return "" +} + +func (m *CreateContainerRequest) GetStderr() string { + if m != nil { + return m.Stderr + } + return "" +} + +func (m *CreateContainerRequest) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CreateContainerRequest) GetNoPivotRoot() bool { + if m != nil { + return m.NoPivotRoot + } + return false +} + +func (m *CreateContainerRequest) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +func (m *CreateContainerRequest) GetRuntimeArgs() []string { + if m != nil { + return m.RuntimeArgs + } + return nil +} + +func (m *CreateContainerRequest) GetCheckpointDir() string { + if m != nil { + return m.CheckpointDir + } + return "" +} + +type CreateContainerResponse struct { + Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"` +} + +func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } +func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*CreateContainerResponse) ProtoMessage() {} +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateContainerResponse) GetContainer() *Container { + if m != nil { + return m.Container + } + return nil +} + +type SignalRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"` +} + +func (m *SignalRequest) Reset() { *m = SignalRequest{} } +func (m *SignalRequest) String() string { return proto.CompactTextString(m) } +func (*SignalRequest) ProtoMessage() {} +func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *SignalRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *SignalRequest) GetPid() string { + if m != nil { + return m.Pid + } + return "" +} + +func (m *SignalRequest) GetSignal() uint32 { + if m != nil { + return m.Signal + } + return 0 +} + +type SignalResponse struct { +} + +func (m *SignalResponse) Reset() { *m = SignalResponse{} } +func (m *SignalResponse) String() string { return proto.CompactTextString(m) } +func (*SignalResponse) ProtoMessage() {} +func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type AddProcessRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` + User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` + Pid string `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"` + Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` + Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` + ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` + SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` + NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` + Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` +} + +func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} } +func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) } +func (*AddProcessRequest) ProtoMessage() {} +func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *AddProcessRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *AddProcessRequest) GetTerminal() bool { + if m != nil { + return m.Terminal + } + return false +} + +func (m *AddProcessRequest) GetUser() *User { + if m != nil { + return m.User + } + return nil +} + +func (m *AddProcessRequest) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *AddProcessRequest) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +func (m *AddProcessRequest) GetCwd() string { + if m != nil { + return m.Cwd + } + return "" +} + +func (m *AddProcessRequest) GetPid() string { + if m != nil { + return m.Pid + } + return "" +} + +func (m *AddProcessRequest) GetStdin() string { + if m != nil { + return m.Stdin + } + return "" +} + +func (m *AddProcessRequest) GetStdout() string { + if m != nil { + return m.Stdout + } + return "" +} + +func (m *AddProcessRequest) GetStderr() string { + if m != nil { + return m.Stderr + } + return "" +} + +func (m *AddProcessRequest) GetCapabilities() []string { + if m != nil { + return m.Capabilities + } + return nil +} + +func (m *AddProcessRequest) GetApparmorProfile() string { + if m != nil { + return m.ApparmorProfile + } + return "" +} + +func (m *AddProcessRequest) GetSelinuxLabel() string { + if m != nil { + return m.SelinuxLabel + } + return "" +} + +func (m *AddProcessRequest) GetNoNewPrivileges() bool { + if m != nil { + return m.NoNewPrivileges + } + return false +} + +func (m *AddProcessRequest) GetRlimits() []*Rlimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type Rlimit struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Soft uint64 `protobuf:"varint,2,opt,name=soft" json:"soft,omitempty"` + Hard uint64 `protobuf:"varint,3,opt,name=hard" json:"hard,omitempty"` +} + +func (m *Rlimit) Reset() { *m = Rlimit{} } +func (m *Rlimit) String() string { return proto.CompactTextString(m) } +func (*Rlimit) ProtoMessage() {} +func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Rlimit) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Rlimit) GetSoft() uint64 { + if m != nil { + return m.Soft + } + return 0 +} + +func (m *Rlimit) GetHard() uint64 { + if m != nil { + return m.Hard + } + return 0 +} + +type User struct { + Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` + AdditionalGids []uint32 `protobuf:"varint,3,rep,packed,name=additionalGids" json:"additionalGids,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *User) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *User) GetGid() uint32 { + if m != nil { + return m.Gid + } + return 0 +} + +func (m *User) GetAdditionalGids() []uint32 { + if m != nil { + return m.AdditionalGids + } + return nil +} + +type AddProcessResponse struct { + SystemPid uint32 `protobuf:"varint,1,opt,name=systemPid" json:"systemPid,omitempty"` +} + +func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} } +func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) } +func (*AddProcessResponse) ProtoMessage() {} +func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *AddProcessResponse) GetSystemPid() uint32 { + if m != nil { + return m.SystemPid + } + return 0 +} + +type CreateCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` + CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} } +func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCheckpointRequest) ProtoMessage() {} +func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *CreateCheckpointRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint { + if m != nil { + return m.Checkpoint + } + return nil +} + +func (m *CreateCheckpointRequest) GetCheckpointDir() string { + if m != nil { + return m.CheckpointDir + } + return "" +} + +type CreateCheckpointResponse struct { +} + +func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} } +func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*CreateCheckpointResponse) ProtoMessage() {} +func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +type DeleteCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} } +func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteCheckpointRequest) ProtoMessage() {} +func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *DeleteCheckpointRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *DeleteCheckpointRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteCheckpointRequest) GetCheckpointDir() string { + if m != nil { + return m.CheckpointDir + } + return "" +} + +type DeleteCheckpointResponse struct { +} + +func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} } +func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteCheckpointResponse) ProtoMessage() {} +func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type ListCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} } +func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*ListCheckpointRequest) ProtoMessage() {} +func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *ListCheckpointRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ListCheckpointRequest) GetCheckpointDir() string { + if m != nil { + return m.CheckpointDir + } + return "" +} + +type Checkpoint struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Exit bool `protobuf:"varint,2,opt,name=exit" json:"exit,omitempty"` + Tcp bool `protobuf:"varint,3,opt,name=tcp" json:"tcp,omitempty"` + UnixSockets bool `protobuf:"varint,4,opt,name=unixSockets" json:"unixSockets,omitempty"` + Shell bool `protobuf:"varint,5,opt,name=shell" json:"shell,omitempty"` + EmptyNS []string `protobuf:"bytes,6,rep,name=emptyNS" json:"emptyNS,omitempty"` +} + +func (m *Checkpoint) Reset() { *m = Checkpoint{} } +func (m *Checkpoint) String() string { return proto.CompactTextString(m) } +func (*Checkpoint) ProtoMessage() {} +func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *Checkpoint) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Checkpoint) GetExit() bool { + if m != nil { + return m.Exit + } + return false +} + +func (m *Checkpoint) GetTcp() bool { + if m != nil { + return m.Tcp + } + return false +} + +func (m *Checkpoint) GetUnixSockets() bool { + if m != nil { + return m.UnixSockets + } + return false +} + +func (m *Checkpoint) GetShell() bool { + if m != nil { + return m.Shell + } + return false +} + +func (m *Checkpoint) GetEmptyNS() []string { + if m != nil { + return m.EmptyNS + } + return nil +} + +type ListCheckpointResponse struct { + Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` +} + +func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} } +func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*ListCheckpointResponse) ProtoMessage() {} +func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint { + if m != nil { + return m.Checkpoints + } + return nil +} + +type StateRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *StateRequest) Reset() { *m = StateRequest{} } +func (m *StateRequest) String() string { return proto.CompactTextString(m) } +func (*StateRequest) ProtoMessage() {} +func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *StateRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type ContainerState struct { + Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` +} + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (m *ContainerState) String() string { return proto.CompactTextString(m) } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *ContainerState) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +type Process struct { + Pid string `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` + User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` + SystemPid uint32 `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"` + Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` + Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` + ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` + SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` + NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` + Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` +} + +func (m *Process) Reset() { *m = Process{} } +func (m *Process) String() string { return proto.CompactTextString(m) } +func (*Process) ProtoMessage() {} +func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *Process) GetPid() string { + if m != nil { + return m.Pid + } + return "" +} + +func (m *Process) GetTerminal() bool { + if m != nil { + return m.Terminal + } + return false +} + +func (m *Process) GetUser() *User { + if m != nil { + return m.User + } + return nil +} + +func (m *Process) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *Process) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +func (m *Process) GetCwd() string { + if m != nil { + return m.Cwd + } + return "" +} + +func (m *Process) GetSystemPid() uint32 { + if m != nil { + return m.SystemPid + } + return 0 +} + +func (m *Process) GetStdin() string { + if m != nil { + return m.Stdin + } + return "" +} + +func (m *Process) GetStdout() string { + if m != nil { + return m.Stdout + } + return "" +} + +func (m *Process) GetStderr() string { + if m != nil { + return m.Stderr + } + return "" +} + +func (m *Process) GetCapabilities() []string { + if m != nil { + return m.Capabilities + } + return nil +} + +func (m *Process) GetApparmorProfile() string { + if m != nil { + return m.ApparmorProfile + } + return "" +} + +func (m *Process) GetSelinuxLabel() string { + if m != nil { + return m.SelinuxLabel + } + return "" +} + +func (m *Process) GetNoNewPrivileges() bool { + if m != nil { + return m.NoNewPrivileges + } + return false +} + +func (m *Process) GetRlimits() []*Rlimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type Container struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Processes []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"` + Status string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` + Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` + Pids []uint32 `protobuf:"varint,6,rep,packed,name=pids" json:"pids,omitempty"` + Runtime string `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"` +} + +func (m *Container) Reset() { *m = Container{} } +func (m *Container) String() string { return proto.CompactTextString(m) } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *Container) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Container) GetBundlePath() string { + if m != nil { + return m.BundlePath + } + return "" +} + +func (m *Container) GetProcesses() []*Process { + if m != nil { + return m.Processes + } + return nil +} + +func (m *Container) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *Container) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Container) GetPids() []uint32 { + if m != nil { + return m.Pids + } + return nil +} + +func (m *Container) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +// Machine is information about machine on which containerd is run +type Machine struct { + Cpus uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"` + Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"` +} + +func (m *Machine) Reset() { *m = Machine{} } +func (m *Machine) String() string { return proto.CompactTextString(m) } +func (*Machine) ProtoMessage() {} +func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *Machine) GetCpus() uint32 { + if m != nil { + return m.Cpus + } + return 0 +} + +func (m *Machine) GetMemory() uint64 { + if m != nil { + return m.Memory + } + return 0 +} + +// StateResponse is information about containerd daemon +type StateResponse struct { + Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"` + Machine *Machine `protobuf:"bytes,2,opt,name=machine" json:"machine,omitempty"` +} + +func (m *StateResponse) Reset() { *m = StateResponse{} } +func (m *StateResponse) String() string { return proto.CompactTextString(m) } +func (*StateResponse) ProtoMessage() {} +func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *StateResponse) GetContainers() []*Container { + if m != nil { + return m.Containers + } + return nil +} + +func (m *StateResponse) GetMachine() *Machine { + if m != nil { + return m.Machine + } + return nil +} + +type UpdateContainerRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` + Resources *UpdateResource `protobuf:"bytes,4,opt,name=resources" json:"resources,omitempty"` +} + +func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } +func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateContainerRequest) ProtoMessage() {} +func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *UpdateContainerRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *UpdateContainerRequest) GetPid() string { + if m != nil { + return m.Pid + } + return "" +} + +func (m *UpdateContainerRequest) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *UpdateContainerRequest) GetResources() *UpdateResource { + if m != nil { + return m.Resources + } + return nil +} + +type UpdateResource struct { + BlkioWeight uint64 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` + CpuShares uint64 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` + CpuPeriod uint64 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` + CpuQuota uint64 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` + CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` + CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` + MemoryLimit uint64 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` + MemorySwap uint64 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` + MemoryReservation uint64 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` + KernelMemoryLimit uint64 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` + KernelTCPMemoryLimit uint64 `protobuf:"varint,11,opt,name=kernelTCPMemoryLimit" json:"kernelTCPMemoryLimit,omitempty"` + BlkioLeafWeight uint64 `protobuf:"varint,12,opt,name=blkioLeafWeight" json:"blkioLeafWeight,omitempty"` + BlkioWeightDevice []*WeightDevice `protobuf:"bytes,13,rep,name=blkioWeightDevice" json:"blkioWeightDevice,omitempty"` + BlkioThrottleReadBpsDevice []*ThrottleDevice `protobuf:"bytes,14,rep,name=blkioThrottleReadBpsDevice" json:"blkioThrottleReadBpsDevice,omitempty"` + BlkioThrottleWriteBpsDevice []*ThrottleDevice `protobuf:"bytes,15,rep,name=blkioThrottleWriteBpsDevice" json:"blkioThrottleWriteBpsDevice,omitempty"` + BlkioThrottleReadIopsDevice []*ThrottleDevice `protobuf:"bytes,16,rep,name=blkioThrottleReadIopsDevice" json:"blkioThrottleReadIopsDevice,omitempty"` + BlkioThrottleWriteIopsDevice []*ThrottleDevice `protobuf:"bytes,17,rep,name=blkioThrottleWriteIopsDevice" json:"blkioThrottleWriteIopsDevice,omitempty"` + PidsLimit uint64 `protobuf:"varint,18,opt,name=pidsLimit" json:"pidsLimit,omitempty"` +} + +func (m *UpdateResource) Reset() { *m = UpdateResource{} } +func (m *UpdateResource) String() string { return proto.CompactTextString(m) } +func (*UpdateResource) ProtoMessage() {} +func (*UpdateResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *UpdateResource) GetBlkioWeight() uint64 { + if m != nil { + return m.BlkioWeight + } + return 0 +} + +func (m *UpdateResource) GetCpuShares() uint64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +func (m *UpdateResource) GetCpuPeriod() uint64 { + if m != nil { + return m.CpuPeriod + } + return 0 +} + +func (m *UpdateResource) GetCpuQuota() uint64 { + if m != nil { + return m.CpuQuota + } + return 0 +} + +func (m *UpdateResource) GetCpusetCpus() string { + if m != nil { + return m.CpusetCpus + } + return "" +} + +func (m *UpdateResource) GetCpusetMems() string { + if m != nil { + return m.CpusetMems + } + return "" +} + +func (m *UpdateResource) GetMemoryLimit() uint64 { + if m != nil { + return m.MemoryLimit + } + return 0 +} + +func (m *UpdateResource) GetMemorySwap() uint64 { + if m != nil { + return m.MemorySwap + } + return 0 +} + +func (m *UpdateResource) GetMemoryReservation() uint64 { + if m != nil { + return m.MemoryReservation + } + return 0 +} + +func (m *UpdateResource) GetKernelMemoryLimit() uint64 { + if m != nil { + return m.KernelMemoryLimit + } + return 0 +} + +func (m *UpdateResource) GetKernelTCPMemoryLimit() uint64 { + if m != nil { + return m.KernelTCPMemoryLimit + } + return 0 +} + +func (m *UpdateResource) GetBlkioLeafWeight() uint64 { + if m != nil { + return m.BlkioLeafWeight + } + return 0 +} + +func (m *UpdateResource) GetBlkioWeightDevice() []*WeightDevice { + if m != nil { + return m.BlkioWeightDevice + } + return nil +} + +func (m *UpdateResource) GetBlkioThrottleReadBpsDevice() []*ThrottleDevice { + if m != nil { + return m.BlkioThrottleReadBpsDevice + } + return nil +} + +func (m *UpdateResource) GetBlkioThrottleWriteBpsDevice() []*ThrottleDevice { + if m != nil { + return m.BlkioThrottleWriteBpsDevice + } + return nil +} + +func (m *UpdateResource) GetBlkioThrottleReadIopsDevice() []*ThrottleDevice { + if m != nil { + return m.BlkioThrottleReadIopsDevice + } + return nil +} + +func (m *UpdateResource) GetBlkioThrottleWriteIopsDevice() []*ThrottleDevice { + if m != nil { + return m.BlkioThrottleWriteIopsDevice + } + return nil +} + +func (m *UpdateResource) GetPidsLimit() uint64 { + if m != nil { + return m.PidsLimit + } + return 0 +} + +type BlockIODevice struct { + Major int64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` +} + +func (m *BlockIODevice) Reset() { *m = BlockIODevice{} } +func (m *BlockIODevice) String() string { return proto.CompactTextString(m) } +func (*BlockIODevice) ProtoMessage() {} +func (*BlockIODevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *BlockIODevice) GetMajor() int64 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *BlockIODevice) GetMinor() int64 { + if m != nil { + return m.Minor + } + return 0 +} + +type WeightDevice struct { + BlkIODevice *BlockIODevice `protobuf:"bytes,1,opt,name=blkIODevice" json:"blkIODevice,omitempty"` + Weight uint32 `protobuf:"varint,2,opt,name=weight" json:"weight,omitempty"` + LeafWeight uint32 `protobuf:"varint,3,opt,name=leafWeight" json:"leafWeight,omitempty"` +} + +func (m *WeightDevice) Reset() { *m = WeightDevice{} } +func (m *WeightDevice) String() string { return proto.CompactTextString(m) } +func (*WeightDevice) ProtoMessage() {} +func (*WeightDevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *WeightDevice) GetBlkIODevice() *BlockIODevice { + if m != nil { + return m.BlkIODevice + } + return nil +} + +func (m *WeightDevice) GetWeight() uint32 { + if m != nil { + return m.Weight + } + return 0 +} + +func (m *WeightDevice) GetLeafWeight() uint32 { + if m != nil { + return m.LeafWeight + } + return 0 +} + +type ThrottleDevice struct { + BlkIODevice *BlockIODevice `protobuf:"bytes,1,opt,name=blkIODevice" json:"blkIODevice,omitempty"` + Rate uint64 `protobuf:"varint,2,opt,name=rate" json:"rate,omitempty"` +} + +func (m *ThrottleDevice) Reset() { *m = ThrottleDevice{} } +func (m *ThrottleDevice) String() string { return proto.CompactTextString(m) } +func (*ThrottleDevice) ProtoMessage() {} +func (*ThrottleDevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *ThrottleDevice) GetBlkIODevice() *BlockIODevice { + if m != nil { + return m.BlkIODevice + } + return nil +} + +func (m *ThrottleDevice) GetRate() uint64 { + if m != nil { + return m.Rate + } + return 0 +} + +type UpdateContainerResponse struct { +} + +func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } +func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateContainerResponse) ProtoMessage() {} +func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type EventsRequest struct { + // Tag 1 is deprecated (old uint64 timestamp) + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"` + StoredOnly bool `protobuf:"varint,3,opt,name=storedOnly" json:"storedOnly,omitempty"` + Id string `protobuf:"bytes,4,opt,name=id" json:"id,omitempty"` +} + +func (m *EventsRequest) Reset() { *m = EventsRequest{} } +func (m *EventsRequest) String() string { return proto.CompactTextString(m) } +func (*EventsRequest) ProtoMessage() {} +func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *EventsRequest) GetTimestamp() *google_protobuf.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *EventsRequest) GetStoredOnly() bool { + if m != nil { + return m.StoredOnly + } + return false +} + +func (m *EventsRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"` + Pid string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"` + // Tag 5 is deprecated (old uint64 timestamp) + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,6,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *Event) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Event) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Event) GetStatus() uint32 { + if m != nil { + return m.Status + } + return 0 +} + +func (m *Event) GetPid() string { + if m != nil { + return m.Pid + } + return "" +} + +func (m *Event) GetTimestamp() *google_protobuf.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type NetworkStats struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes" json:"rx_bytes,omitempty"` + Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets,json=rxPackets" json:"rx_Packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors,json=RxErrors" json:"Rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped,json=RxDropped" json:"Rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes,json=TxBytes" json:"Tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets,json=TxPackets" json:"Tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors,json=TxErrors" json:"Tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped,json=TxDropped" json:"Tx_dropped,omitempty"` +} + +func (m *NetworkStats) Reset() { *m = NetworkStats{} } +func (m *NetworkStats) String() string { return proto.CompactTextString(m) } +func (*NetworkStats) ProtoMessage() {} +func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *NetworkStats) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NetworkStats) GetRxBytes() uint64 { + if m != nil { + return m.RxBytes + } + return 0 +} + +func (m *NetworkStats) GetRx_Packets() uint64 { + if m != nil { + return m.Rx_Packets + } + return 0 +} + +func (m *NetworkStats) GetRxErrors() uint64 { + if m != nil { + return m.RxErrors + } + return 0 +} + +func (m *NetworkStats) GetRxDropped() uint64 { + if m != nil { + return m.RxDropped + } + return 0 +} + +func (m *NetworkStats) GetTxBytes() uint64 { + if m != nil { + return m.TxBytes + } + return 0 +} + +func (m *NetworkStats) GetTxPackets() uint64 { + if m != nil { + return m.TxPackets + } + return 0 +} + +func (m *NetworkStats) GetTxErrors() uint64 { + if m != nil { + return m.TxErrors + } + return 0 +} + +func (m *NetworkStats) GetTxDropped() uint64 { + if m != nil { + return m.TxDropped + } + return 0 +} + +type CpuUsage struct { + TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"` + PercpuUsage []uint64 `protobuf:"varint,2,rep,packed,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"` + UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"` + UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"` +} + +func (m *CpuUsage) Reset() { *m = CpuUsage{} } +func (m *CpuUsage) String() string { return proto.CompactTextString(m) } +func (*CpuUsage) ProtoMessage() {} +func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *CpuUsage) GetTotalUsage() uint64 { + if m != nil { + return m.TotalUsage + } + return 0 +} + +func (m *CpuUsage) GetPercpuUsage() []uint64 { + if m != nil { + return m.PercpuUsage + } + return nil +} + +func (m *CpuUsage) GetUsageInKernelmode() uint64 { + if m != nil { + return m.UsageInKernelmode + } + return 0 +} + +func (m *CpuUsage) GetUsageInUsermode() uint64 { + if m != nil { + return m.UsageInUsermode + } + return 0 +} + +type ThrottlingData struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` +} + +func (m *ThrottlingData) Reset() { *m = ThrottlingData{} } +func (m *ThrottlingData) String() string { return proto.CompactTextString(m) } +func (*ThrottlingData) ProtoMessage() {} +func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *ThrottlingData) GetPeriods() uint64 { + if m != nil { + return m.Periods + } + return 0 +} + +func (m *ThrottlingData) GetThrottledPeriods() uint64 { + if m != nil { + return m.ThrottledPeriods + } + return 0 +} + +func (m *ThrottlingData) GetThrottledTime() uint64 { + if m != nil { + return m.ThrottledTime + } + return 0 +} + +type CpuStats struct { + CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"` + ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data,json=throttlingData" json:"throttling_data,omitempty"` + SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage,json=systemUsage" json:"system_usage,omitempty"` +} + +func (m *CpuStats) Reset() { *m = CpuStats{} } +func (m *CpuStats) String() string { return proto.CompactTextString(m) } +func (*CpuStats) ProtoMessage() {} +func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *CpuStats) GetCpuUsage() *CpuUsage { + if m != nil { + return m.CpuUsage + } + return nil +} + +func (m *CpuStats) GetThrottlingData() *ThrottlingData { + if m != nil { + return m.ThrottlingData + } + return nil +} + +func (m *CpuStats) GetSystemUsage() uint64 { + if m != nil { + return m.SystemUsage + } + return 0 +} + +type PidsStats struct { + Current uint64 `protobuf:"varint,1,opt,name=current" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` +} + +func (m *PidsStats) Reset() { *m = PidsStats{} } +func (m *PidsStats) String() string { return proto.CompactTextString(m) } +func (*PidsStats) ProtoMessage() {} +func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +func (m *PidsStats) GetCurrent() uint64 { + if m != nil { + return m.Current + } + return 0 +} + +func (m *PidsStats) GetLimit() uint64 { + if m != nil { + return m.Limit + } + return 0 +} + +type MemoryData struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` + Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` +} + +func (m *MemoryData) Reset() { *m = MemoryData{} } +func (m *MemoryData) String() string { return proto.CompactTextString(m) } +func (*MemoryData) ProtoMessage() {} +func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *MemoryData) GetUsage() uint64 { + if m != nil { + return m.Usage + } + return 0 +} + +func (m *MemoryData) GetMaxUsage() uint64 { + if m != nil { + return m.MaxUsage + } + return 0 +} + +func (m *MemoryData) GetFailcnt() uint64 { + if m != nil { + return m.Failcnt + } + return 0 +} + +func (m *MemoryData) GetLimit() uint64 { + if m != nil { + return m.Limit + } + return 0 +} + +type MemoryStats struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"` + Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"` + SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage,json=swapUsage" json:"swap_usage,omitempty"` + KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage,json=kernelUsage" json:"kernel_usage,omitempty"` + Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *MemoryStats) Reset() { *m = MemoryStats{} } +func (m *MemoryStats) String() string { return proto.CompactTextString(m) } +func (*MemoryStats) ProtoMessage() {} +func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +func (m *MemoryStats) GetCache() uint64 { + if m != nil { + return m.Cache + } + return 0 +} + +func (m *MemoryStats) GetUsage() *MemoryData { + if m != nil { + return m.Usage + } + return nil +} + +func (m *MemoryStats) GetSwapUsage() *MemoryData { + if m != nil { + return m.SwapUsage + } + return nil +} + +func (m *MemoryStats) GetKernelUsage() *MemoryData { + if m != nil { + return m.KernelUsage + } + return nil +} + +func (m *MemoryStats) GetStats() map[string]uint64 { + if m != nil { + return m.Stats + } + return nil +} + +type BlkioStatsEntry struct { + Major uint64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Op string `protobuf:"bytes,3,opt,name=op" json:"op,omitempty"` + Value uint64 `protobuf:"varint,4,opt,name=value" json:"value,omitempty"` +} + +func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} } +func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) } +func (*BlkioStatsEntry) ProtoMessage() {} +func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *BlkioStatsEntry) GetMajor() uint64 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *BlkioStatsEntry) GetMinor() uint64 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *BlkioStatsEntry) GetOp() string { + if m != nil { + return m.Op + } + return "" +} + +func (m *BlkioStatsEntry) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +type BlkioStats struct { + IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"` +} + +func (m *BlkioStats) Reset() { *m = BlkioStats{} } +func (m *BlkioStats) String() string { return proto.CompactTextString(m) } +func (*BlkioStats) ProtoMessage() {} +func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServiceBytesRecursive + } + return nil +} + +func (m *BlkioStats) GetIoServicedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServicedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoQueuedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoQueuedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoServiceTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServiceTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetIoWaitTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoWaitTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetIoMergedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoMergedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry { + if m != nil { + return m.SectorsRecursive + } + return nil +} + +type HugetlbStats struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` + Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` +} + +func (m *HugetlbStats) Reset() { *m = HugetlbStats{} } +func (m *HugetlbStats) String() string { return proto.CompactTextString(m) } +func (*HugetlbStats) ProtoMessage() {} +func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *HugetlbStats) GetUsage() uint64 { + if m != nil { + return m.Usage + } + return 0 +} + +func (m *HugetlbStats) GetMaxUsage() uint64 { + if m != nil { + return m.MaxUsage + } + return 0 +} + +func (m *HugetlbStats) GetFailcnt() uint64 { + if m != nil { + return m.Failcnt + } + return 0 +} + +func (m *HugetlbStats) GetLimit() uint64 { + if m != nil { + return m.Limit + } + return 0 +} + +type CgroupStats struct { + CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats,json=cpuStats" json:"cpu_stats,omitempty"` + MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats,json=memoryStats" json:"memory_stats,omitempty"` + BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats,json=blkioStats" json:"blkio_stats,omitempty"` + HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats,json=hugetlbStats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats,json=pidsStats" json:"pids_stats,omitempty"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (m *CgroupStats) String() string { return proto.CompactTextString(m) } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *CgroupStats) GetCpuStats() *CpuStats { + if m != nil { + return m.CpuStats + } + return nil +} + +func (m *CgroupStats) GetMemoryStats() *MemoryStats { + if m != nil { + return m.MemoryStats + } + return nil +} + +func (m *CgroupStats) GetBlkioStats() *BlkioStats { + if m != nil { + return m.BlkioStats + } + return nil +} + +func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats { + if m != nil { + return m.HugetlbStats + } + return nil +} + +func (m *CgroupStats) GetPidsStats() *PidsStats { + if m != nil { + return m.PidsStats + } + return nil +} + +type StatsResponse struct { + NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats,json=networkStats" json:"network_stats,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats,json=cgroupStats" json:"cgroup_stats,omitempty"` + // Tag 3 is deprecated (old uint64 timestamp) + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *StatsResponse) Reset() { *m = StatsResponse{} } +func (m *StatsResponse) String() string { return proto.CompactTextString(m) } +func (*StatsResponse) ProtoMessage() {} +func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *StatsResponse) GetNetworkStats() []*NetworkStats { + if m != nil { + return m.NetworkStats + } + return nil +} + +func (m *StatsResponse) GetCgroupStats() *CgroupStats { + if m != nil { + return m.CgroupStats + } + return nil +} + +func (m *StatsResponse) GetTimestamp() *google_protobuf.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type StatsRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *StatsRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func init() { + proto.RegisterType((*GetServerVersionRequest)(nil), "types.GetServerVersionRequest") + proto.RegisterType((*GetServerVersionResponse)(nil), "types.GetServerVersionResponse") + proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest") + proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse") + proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest") + proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse") + proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest") + proto.RegisterType((*SignalResponse)(nil), "types.SignalResponse") + proto.RegisterType((*AddProcessRequest)(nil), "types.AddProcessRequest") + proto.RegisterType((*Rlimit)(nil), "types.Rlimit") + proto.RegisterType((*User)(nil), "types.User") + proto.RegisterType((*AddProcessResponse)(nil), "types.AddProcessResponse") + proto.RegisterType((*CreateCheckpointRequest)(nil), "types.CreateCheckpointRequest") + proto.RegisterType((*CreateCheckpointResponse)(nil), "types.CreateCheckpointResponse") + proto.RegisterType((*DeleteCheckpointRequest)(nil), "types.DeleteCheckpointRequest") + proto.RegisterType((*DeleteCheckpointResponse)(nil), "types.DeleteCheckpointResponse") + proto.RegisterType((*ListCheckpointRequest)(nil), "types.ListCheckpointRequest") + proto.RegisterType((*Checkpoint)(nil), "types.Checkpoint") + proto.RegisterType((*ListCheckpointResponse)(nil), "types.ListCheckpointResponse") + proto.RegisterType((*StateRequest)(nil), "types.StateRequest") + proto.RegisterType((*ContainerState)(nil), "types.ContainerState") + proto.RegisterType((*Process)(nil), "types.Process") + proto.RegisterType((*Container)(nil), "types.Container") + proto.RegisterType((*Machine)(nil), "types.Machine") + proto.RegisterType((*StateResponse)(nil), "types.StateResponse") + proto.RegisterType((*UpdateContainerRequest)(nil), "types.UpdateContainerRequest") + proto.RegisterType((*UpdateResource)(nil), "types.UpdateResource") + proto.RegisterType((*BlockIODevice)(nil), "types.BlockIODevice") + proto.RegisterType((*WeightDevice)(nil), "types.WeightDevice") + proto.RegisterType((*ThrottleDevice)(nil), "types.ThrottleDevice") + proto.RegisterType((*UpdateContainerResponse)(nil), "types.UpdateContainerResponse") + proto.RegisterType((*EventsRequest)(nil), "types.EventsRequest") + proto.RegisterType((*Event)(nil), "types.Event") + proto.RegisterType((*NetworkStats)(nil), "types.NetworkStats") + proto.RegisterType((*CpuUsage)(nil), "types.CpuUsage") + proto.RegisterType((*ThrottlingData)(nil), "types.ThrottlingData") + proto.RegisterType((*CpuStats)(nil), "types.CpuStats") + proto.RegisterType((*PidsStats)(nil), "types.PidsStats") + proto.RegisterType((*MemoryData)(nil), "types.MemoryData") + proto.RegisterType((*MemoryStats)(nil), "types.MemoryStats") + proto.RegisterType((*BlkioStatsEntry)(nil), "types.BlkioStatsEntry") + proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats") + proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats") + proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats") + proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse") + proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for API service + +type APIClient interface { + GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) + CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) + UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) + Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) + UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) + AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) + CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) + DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) + ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) + State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) + Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) + Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) +} + +type aPIClient struct { + cc *grpc.ClientConn +} + +func NewAPIClient(cc *grpc.ClientConn) APIClient { + return &aPIClient{cc} +} + +func (c *aPIClient) GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) { + out := new(GetServerVersionResponse) + err := grpc.Invoke(ctx, "/types.API/GetServerVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { + out := new(CreateContainerResponse) + err := grpc.Invoke(ctx, "/types.API/CreateContainer", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { + out := new(UpdateContainerResponse) + err := grpc.Invoke(ctx, "/types.API/UpdateContainer", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) { + out := new(SignalResponse) + err := grpc.Invoke(ctx, "/types.API/Signal", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) { + out := new(UpdateProcessResponse) + err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) { + out := new(AddProcessResponse) + err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) { + out := new(CreateCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/CreateCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) { + out := new(DeleteCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/DeleteCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) { + out := new(ListCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/ListCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) { + out := new(StateResponse) + err := grpc.Invoke(ctx, "/types.API/State", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/types.API/Events", opts...) + if err != nil { + return nil, err + } + x := &aPIEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type API_EventsClient interface { + Recv() (*Event, error) + grpc.ClientStream +} + +type aPIEventsClient struct { + grpc.ClientStream +} + +func (x *aPIEventsClient) Recv() (*Event, error) { + m := new(Event) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) { + out := new(StatsResponse) + err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for API service + +type APIServer interface { + GetServerVersion(context.Context, *GetServerVersionRequest) (*GetServerVersionResponse, error) + CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + Signal(context.Context, *SignalRequest) (*SignalResponse, error) + UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error) + AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error) + CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error) + DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error) + ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error) + State(context.Context, *StateRequest) (*StateResponse, error) + Events(*EventsRequest, API_EventsServer) error + Stats(context.Context, *StatsRequest) (*StatsResponse, error) +} + +func RegisterAPIServer(s *grpc.Server, srv APIServer) { + s.RegisterService(&_API_serviceDesc, srv) +} + +func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServerVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).GetServerVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/GetServerVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).UpdateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Signal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Signal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Signal(ctx, req.(*SignalRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).UpdateProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).AddProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/AddProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).DeleteCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/DeleteCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).ListCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/ListCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).State(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/State", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).State(ctx, req.(*StateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(APIServer).Events(m, &aPIEventsServer{stream}) +} + +type API_EventsServer interface { + Send(*Event) error + grpc.ServerStream +} + +type aPIEventsServer struct { + grpc.ServerStream +} + +func (x *aPIEventsServer) Send(m *Event) error { + return x.ServerStream.SendMsg(m) +} + +func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Stats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Stats(ctx, req.(*StatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _API_serviceDesc = grpc.ServiceDesc{ + ServiceName: "types.API", + HandlerType: (*APIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetServerVersion", + Handler: _API_GetServerVersion_Handler, + }, + { + MethodName: "CreateContainer", + Handler: _API_CreateContainer_Handler, + }, + { + MethodName: "UpdateContainer", + Handler: _API_UpdateContainer_Handler, + }, + { + MethodName: "Signal", + Handler: _API_Signal_Handler, + }, + { + MethodName: "UpdateProcess", + Handler: _API_UpdateProcess_Handler, + }, + { + MethodName: "AddProcess", + Handler: _API_AddProcess_Handler, + }, + { + MethodName: "CreateCheckpoint", + Handler: _API_CreateCheckpoint_Handler, + }, + { + MethodName: "DeleteCheckpoint", + Handler: _API_DeleteCheckpoint_Handler, + }, + { + MethodName: "ListCheckpoint", + Handler: _API_ListCheckpoint_Handler, + }, + { + MethodName: "State", + Handler: _API_State_Handler, + }, + { + MethodName: "Stats", + Handler: _API_Stats_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Events", + Handler: _API_Events_Handler, + ServerStreams: true, + }, + }, + Metadata: "api.proto", +} + +func init() { proto.RegisterFile("api.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2632 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x19, 0x4d, 0x6f, 0x24, 0x47, + 0x75, 0x67, 0xa6, 0xed, 0xf1, 0xbc, 0xf9, 0xb0, 0xa7, 0xd6, 0xeb, 0xed, 0x9d, 0x24, 0xbb, 0x4e, + 0x2b, 0x10, 0x03, 0x91, 0xb3, 0x78, 0x13, 0x58, 0x11, 0x09, 0x69, 0xd7, 0x1b, 0x82, 0xc9, 0x3a, + 0x99, 0xb4, 0x6d, 0x56, 0x48, 0x48, 0xa3, 0x76, 0x77, 0xed, 0x4c, 0xe1, 0x9e, 0xae, 0x4e, 0x75, + 0xb5, 0x3d, 0xbe, 0xe4, 0xc0, 0x01, 0x0e, 0x48, 0x70, 0x45, 0xe2, 0xc8, 0x8d, 0x3b, 0x07, 0xf8, + 0x03, 0x48, 0xfc, 0x10, 0x6e, 0xdc, 0x39, 0xa2, 0xfa, 0xe8, 0xea, 0xea, 0xf9, 0xf0, 0x6e, 0x90, + 0x10, 0x17, 0x2e, 0xad, 0x7a, 0xaf, 0xde, 0x57, 0xbd, 0x7a, 0xef, 0xd5, 0xab, 0x6a, 0x68, 0x05, + 0x29, 0xd9, 0x4f, 0x19, 0xe5, 0x14, 0xad, 0xf1, 0xeb, 0x14, 0x67, 0x83, 0x07, 0x63, 0x4a, 0xc7, + 0x31, 0x7e, 0x5f, 0x22, 0xcf, 0xf3, 0x97, 0xef, 0x73, 0x32, 0xc5, 0x19, 0x0f, 0xa6, 0xa9, 0xa2, + 0xf3, 0xee, 0xc1, 0xdd, 0x4f, 0x30, 0x3f, 0xc1, 0xec, 0x12, 0xb3, 0x9f, 0x62, 0x96, 0x11, 0x9a, + 0xf8, 0xf8, 0xcb, 0x1c, 0x67, 0xdc, 0x9b, 0x81, 0xbb, 0x38, 0x95, 0xa5, 0x34, 0xc9, 0x30, 0xda, + 0x86, 0xb5, 0x69, 0xf0, 0x0b, 0xca, 0xdc, 0xda, 0x6e, 0x6d, 0xaf, 0xeb, 0x2b, 0x40, 0x62, 0x49, + 0x42, 0x99, 0x5b, 0xd7, 0x58, 0x01, 0x08, 0x6c, 0x1a, 0xf0, 0x70, 0xe2, 0x36, 0x14, 0x56, 0x02, + 0x68, 0x00, 0x1b, 0x0c, 0x5f, 0x12, 0x21, 0xd5, 0x75, 0x76, 0x6b, 0x7b, 0x2d, 0xdf, 0xc0, 0xde, + 0xaf, 0x6a, 0xb0, 0x7d, 0x96, 0x46, 0x01, 0xc7, 0x43, 0x46, 0x43, 0x9c, 0x65, 0xda, 0x24, 0xd4, + 0x83, 0x3a, 0x89, 0xa4, 0xce, 0x96, 0x5f, 0x27, 0x11, 0xda, 0x82, 0x46, 0x4a, 0x22, 0xa9, 0xae, + 0xe5, 0x8b, 0x21, 0xba, 0x0f, 0x10, 0xc6, 0x34, 0xc3, 0x27, 0x3c, 0x22, 0x89, 0xd4, 0xb8, 0xe1, + 0x5b, 0x18, 0x61, 0xcc, 0x15, 0x89, 0xf8, 0x44, 0xea, 0xec, 0xfa, 0x0a, 0x40, 0x3b, 0xb0, 0x3e, + 0xc1, 0x64, 0x3c, 0xe1, 0xee, 0x9a, 0x44, 0x6b, 0xc8, 0xbb, 0x0b, 0x77, 0xe6, 0xec, 0x50, 0xeb, + 0xf7, 0xfe, 0x5e, 0x87, 0x9d, 0x43, 0x86, 0x03, 0x8e, 0x0f, 0x69, 0xc2, 0x03, 0x92, 0x60, 0xb6, + 0xca, 0xc6, 0xfb, 0x00, 0xe7, 0x79, 0x12, 0xc5, 0x78, 0x18, 0xf0, 0x89, 0x36, 0xd5, 0xc2, 0x48, + 0x8b, 0x27, 0x38, 0xbc, 0x48, 0x29, 0x49, 0xb8, 0xb4, 0xb8, 0xe5, 0x5b, 0x18, 0x61, 0x71, 0x26, + 0x17, 0xa3, 0xbc, 0xa4, 0x00, 0x61, 0x71, 0xc6, 0x23, 0x9a, 0x2b, 0x8b, 0x5b, 0xbe, 0x86, 0x34, + 0x1e, 0x33, 0xe6, 0xae, 0x1b, 0x3c, 0x66, 0x4c, 0xe0, 0xe3, 0xe0, 0x1c, 0xc7, 0x99, 0xdb, 0xdc, + 0x6d, 0x08, 0xbc, 0x82, 0xd0, 0x2e, 0xb4, 0x13, 0x3a, 0x24, 0x97, 0x94, 0xfb, 0x94, 0x72, 0x77, + 0x43, 0x3a, 0xcc, 0x46, 0x21, 0x17, 0x9a, 0x2c, 0x4f, 0x44, 0xdc, 0xb8, 0x2d, 0x29, 0xb2, 0x00, + 0x05, 0xaf, 0x1e, 0x3e, 0x61, 0xe3, 0xcc, 0x05, 0x29, 0xd8, 0x46, 0xa1, 0x77, 0xa0, 0x5b, 0xae, + 0xe4, 0x19, 0x61, 0x6e, 0x5b, 0x4a, 0xa8, 0x22, 0xbd, 0x23, 0xb8, 0xbb, 0xe0, 0x4b, 0x1d, 0x67, + 0xfb, 0xd0, 0x0a, 0x0b, 0xa4, 0xf4, 0x69, 0xfb, 0x60, 0x6b, 0x5f, 0x86, 0xf6, 0x7e, 0x49, 0x5c, + 0x92, 0x78, 0x47, 0xd0, 0x3d, 0x21, 0xe3, 0x24, 0x88, 0x5f, 0x3f, 0x62, 0x84, 0xc7, 0x24, 0x8b, + 0x8e, 0x4f, 0x0d, 0x79, 0x5b, 0xd0, 0x2b, 0x44, 0xe9, 0x4d, 0xff, 0x73, 0x03, 0xfa, 0x4f, 0xa2, + 0xe8, 0x15, 0x31, 0x39, 0x80, 0x0d, 0x8e, 0xd9, 0x94, 0x08, 0x89, 0x75, 0xe9, 0x4e, 0x03, 0xa3, + 0x07, 0xe0, 0xe4, 0x19, 0x66, 0x52, 0x53, 0xfb, 0xa0, 0xad, 0x57, 0x72, 0x96, 0x61, 0xe6, 0xcb, + 0x09, 0x84, 0xc0, 0x09, 0x84, 0x2f, 0x1d, 0xe9, 0x4b, 0x39, 0x16, 0x26, 0xe3, 0xe4, 0xd2, 0x5d, + 0x93, 0x28, 0x31, 0x14, 0x98, 0xf0, 0x2a, 0xd2, 0x3b, 0x2c, 0x86, 0xc5, 0xb2, 0x9a, 0xe5, 0xb2, + 0x4c, 0xd8, 0x6c, 0x2c, 0x0f, 0x9b, 0xd6, 0x8a, 0xb0, 0x81, 0x4a, 0xd8, 0x78, 0xd0, 0x09, 0x83, + 0x34, 0x38, 0x27, 0x31, 0xe1, 0x04, 0x67, 0x6e, 0x5b, 0x1a, 0x51, 0xc1, 0xa1, 0x3d, 0xd8, 0x0c, + 0xd2, 0x34, 0x60, 0x53, 0xca, 0x86, 0x8c, 0xbe, 0x24, 0x31, 0x76, 0x3b, 0x52, 0xc8, 0x3c, 0x5a, + 0x48, 0xcb, 0x70, 0x4c, 0x92, 0x7c, 0xf6, 0x5c, 0x44, 0x9f, 0xdb, 0x95, 0x64, 0x15, 0x9c, 0x90, + 0x96, 0xd0, 0xcf, 0xf0, 0xd5, 0x90, 0x91, 0x4b, 0x12, 0xe3, 0x31, 0xce, 0xdc, 0x9e, 0xf4, 0xe2, + 0x3c, 0x1a, 0xbd, 0x0b, 0x4d, 0x16, 0x93, 0x29, 0xe1, 0x99, 0xbb, 0xb9, 0xdb, 0xd8, 0x6b, 0x1f, + 0x74, 0xb5, 0x3f, 0x7d, 0x89, 0xf5, 0x8b, 0x59, 0xef, 0x19, 0xac, 0x2b, 0x94, 0x70, 0xaf, 0x20, + 0xd1, 0xbb, 0x25, 0xc7, 0x02, 0x97, 0xd1, 0x97, 0x5c, 0xee, 0x95, 0xe3, 0xcb, 0xb1, 0xc0, 0x4d, + 0x02, 0x16, 0xc9, 0x7d, 0x72, 0x7c, 0x39, 0xf6, 0x7c, 0x70, 0xc4, 0x46, 0x09, 0x57, 0xe7, 0x7a, + 0xc3, 0xbb, 0xbe, 0x18, 0x0a, 0xcc, 0x58, 0xc7, 0x54, 0xd7, 0x17, 0x43, 0xf4, 0x4d, 0xe8, 0x05, + 0x51, 0x44, 0x38, 0xa1, 0x49, 0x10, 0x7f, 0x42, 0xa2, 0xcc, 0x6d, 0xec, 0x36, 0xf6, 0xba, 0xfe, + 0x1c, 0xd6, 0x3b, 0x00, 0x64, 0x07, 0x94, 0x0e, 0xfa, 0x37, 0xa1, 0x95, 0x5d, 0x67, 0x1c, 0x4f, + 0x87, 0x46, 0x4f, 0x89, 0xf0, 0x7e, 0x59, 0x33, 0xe9, 0x62, 0xb2, 0x68, 0x55, 0x2c, 0x7e, 0xb7, + 0x52, 0x5b, 0xea, 0x32, 0xea, 0xfa, 0x45, 0xfe, 0x94, 0xdc, 0x76, 0xb9, 0x59, 0x48, 0xd9, 0xc6, + 0xb2, 0x94, 0x1d, 0x80, 0xbb, 0x68, 0x83, 0x4e, 0x93, 0x10, 0xee, 0x3e, 0xc3, 0x31, 0x7e, 0x1d, + 0xfb, 0x10, 0x38, 0x49, 0x30, 0xc5, 0x3a, 0x1d, 0xe5, 0xf8, 0xf5, 0x0d, 0x58, 0x54, 0xa2, 0x0d, + 0x38, 0x86, 0x3b, 0xcf, 0x49, 0xc6, 0x5f, 0xad, 0x7e, 0x41, 0x55, 0x7d, 0x99, 0xaa, 0xdf, 0xd7, + 0x00, 0x4a, 0x59, 0xc6, 0xe6, 0x9a, 0x65, 0x33, 0x02, 0x07, 0xcf, 0x08, 0xd7, 0xf9, 0x2e, 0xc7, + 0x22, 0x2a, 0x78, 0x98, 0xea, 0x23, 0x48, 0x0c, 0x45, 0xbd, 0xcc, 0x13, 0x32, 0x3b, 0xa1, 0xe1, + 0x05, 0xe6, 0x99, 0xac, 0xe7, 0x1b, 0xbe, 0x8d, 0x92, 0x49, 0x3b, 0xc1, 0x71, 0x2c, 0x8b, 0xfa, + 0x86, 0xaf, 0x00, 0x51, 0x81, 0xf1, 0x34, 0xe5, 0xd7, 0x9f, 0x9d, 0xb8, 0xeb, 0x32, 0xff, 0x0a, + 0xd0, 0x3b, 0x86, 0x9d, 0xf9, 0x95, 0xea, 0x18, 0x7a, 0x04, 0xed, 0x72, 0x15, 0x99, 0x5b, 0x93, + 0x09, 0xb2, 0x64, 0xeb, 0x6d, 0x2a, 0xef, 0x3e, 0x74, 0x4e, 0x78, 0xc0, 0xf1, 0x0a, 0x7f, 0x79, + 0x7b, 0xd0, 0x33, 0x55, 0x57, 0x12, 0xaa, 0xba, 0x11, 0xf0, 0x3c, 0xd3, 0x54, 0x1a, 0xf2, 0xfe, + 0xd2, 0x80, 0xa6, 0x0e, 0xeb, 0xa2, 0x36, 0xd5, 0xca, 0xda, 0xf4, 0x3f, 0x29, 0x91, 0x95, 0xac, + 0x6a, 0xce, 0x65, 0xd5, 0xff, 0xcb, 0x65, 0x59, 0x2e, 0xff, 0x56, 0x83, 0x96, 0xd9, 0xe6, 0xaf, + 0xdd, 0xce, 0xbc, 0x07, 0xad, 0x54, 0x6d, 0x3c, 0x56, 0x55, 0xaf, 0x7d, 0xd0, 0xd3, 0x8a, 0x8a, + 0x3a, 0x57, 0x12, 0x58, 0xf1, 0xe3, 0xd8, 0xf1, 0x63, 0xb5, 0x2b, 0x6b, 0x95, 0x76, 0x05, 0x81, + 0x93, 0x8a, 0x72, 0xba, 0x2e, 0xcb, 0xa9, 0x1c, 0xdb, 0x0d, 0x4a, 0xb3, 0xd2, 0xa0, 0x78, 0x1f, + 0x42, 0xf3, 0x38, 0x08, 0x27, 0x24, 0x91, 0x19, 0x1a, 0xa6, 0x3a, 0x4c, 0xbb, 0xbe, 0x1c, 0x0b, + 0x25, 0x53, 0x3c, 0xa5, 0xec, 0x5a, 0xd7, 0x7e, 0x0d, 0x79, 0x17, 0xd0, 0xd5, 0x69, 0xa0, 0x93, + 0xe9, 0x21, 0x80, 0x69, 0x31, 0x8a, 0x5c, 0x5a, 0x6c, 0x43, 0x2c, 0x1a, 0xb4, 0x07, 0xcd, 0xa9, + 0xd2, 0xac, 0xab, 0x6e, 0xe1, 0x03, 0x6d, 0x8f, 0x5f, 0x4c, 0x7b, 0xbf, 0xae, 0xc1, 0x8e, 0xea, + 0x31, 0x5f, 0xd9, 0x49, 0x2e, 0xef, 0x5d, 0x94, 0xfb, 0x1a, 0x15, 0xf7, 0x3d, 0x82, 0x16, 0xc3, + 0x19, 0xcd, 0x59, 0x88, 0x95, 0x67, 0xdb, 0x07, 0x77, 0x8a, 0x4c, 0x92, 0xba, 0x7c, 0x3d, 0xeb, + 0x97, 0x74, 0xde, 0x6f, 0x9a, 0xd0, 0xab, 0xce, 0x8a, 0x8a, 0x75, 0x1e, 0x5f, 0x10, 0xfa, 0x42, + 0x35, 0xc7, 0x35, 0xe9, 0x26, 0x1b, 0x25, 0xb2, 0x2a, 0x4c, 0xf3, 0x93, 0x49, 0xc0, 0x70, 0xa6, + 0xdd, 0x58, 0x22, 0xf4, 0xec, 0x10, 0x33, 0x42, 0x8b, 0xc3, 0xb4, 0x44, 0x88, 0x32, 0x10, 0xa6, + 0xf9, 0x17, 0x39, 0xe5, 0x81, 0x34, 0xd2, 0xf1, 0x0d, 0x2c, 0xbb, 0xe2, 0x34, 0xcf, 0x30, 0x3f, + 0x14, 0xbb, 0xb6, 0xa6, 0xbb, 0x62, 0x83, 0x29, 0xe7, 0x8f, 0xf1, 0x34, 0xd3, 0x69, 0x6e, 0x61, + 0x84, 0xe5, 0x6a, 0x37, 0x9f, 0x8b, 0xa0, 0x96, 0x81, 0xe1, 0xf8, 0x36, 0x4a, 0x48, 0x50, 0xe0, + 0xc9, 0x55, 0x90, 0xca, 0xb4, 0x77, 0x7c, 0x0b, 0x83, 0xde, 0x83, 0xbe, 0x82, 0x7c, 0x9c, 0x61, + 0x76, 0x19, 0x88, 0x63, 0x5b, 0x96, 0x01, 0xc7, 0x5f, 0x9c, 0x10, 0xd4, 0x17, 0x98, 0x25, 0x38, + 0x3e, 0xb6, 0xb4, 0x82, 0xa2, 0x5e, 0x98, 0x40, 0x07, 0xb0, 0xad, 0x90, 0xa7, 0x87, 0x43, 0x9b, + 0xa1, 0x2d, 0x19, 0x96, 0xce, 0x89, 0x4c, 0x97, 0x8e, 0x7f, 0x8e, 0x83, 0x97, 0x7a, 0x3f, 0x3a, + 0x92, 0x7c, 0x1e, 0x8d, 0x9e, 0x40, 0xdf, 0xda, 0xa2, 0x67, 0xf8, 0x92, 0x84, 0xd8, 0xed, 0xca, + 0xa8, 0xbd, 0xad, 0xa3, 0xc0, 0x9e, 0xf2, 0x17, 0xa9, 0xd1, 0x19, 0x0c, 0x24, 0xf2, 0x74, 0xc2, + 0x28, 0xe7, 0x31, 0xf6, 0x71, 0x10, 0x3d, 0x4d, 0x33, 0x2d, 0xab, 0x27, 0x65, 0x15, 0x11, 0x55, + 0xd0, 0x68, 0x69, 0x37, 0x30, 0xa2, 0x17, 0xf0, 0x46, 0x65, 0xf6, 0x05, 0x23, 0x1c, 0x97, 0x72, + 0x37, 0x6f, 0x92, 0x7b, 0x13, 0xe7, 0x82, 0x60, 0xa1, 0xf6, 0x88, 0x1a, 0xc1, 0x5b, 0xaf, 0x2f, + 0xb8, 0xca, 0x89, 0x7e, 0x06, 0x6f, 0x2e, 0xea, 0xb5, 0x24, 0xf7, 0x6f, 0x92, 0x7c, 0x23, 0xab, + 0x48, 0x0e, 0x51, 0xbf, 0xd4, 0xce, 0x23, 0x95, 0x1c, 0x06, 0xe1, 0x7d, 0x04, 0xdd, 0xa7, 0x31, + 0x0d, 0x2f, 0x8e, 0x3e, 0xd7, 0xe4, 0x95, 0x2b, 0x77, 0x63, 0xe9, 0x95, 0xbb, 0xa1, 0xaf, 0xdc, + 0xde, 0x57, 0xd0, 0xa9, 0x6c, 0xe7, 0xf7, 0x64, 0x1e, 0x17, 0xa2, 0xf4, 0x45, 0x6a, 0x5b, 0x1b, + 0x5d, 0x51, 0xe3, 0xdb, 0x84, 0xa2, 0xbe, 0x5c, 0xa9, 0x50, 0x53, 0xcd, 0xad, 0x86, 0x44, 0xee, + 0xc4, 0x65, 0x18, 0xaa, 0x7b, 0x93, 0x85, 0xf1, 0x7e, 0x0e, 0xbd, 0xaa, 0x2b, 0xfe, 0x63, 0x0b, + 0x10, 0x38, 0x2c, 0xe0, 0xb8, 0xe8, 0xce, 0xc5, 0xd8, 0xbb, 0x07, 0x77, 0x17, 0x2a, 0xa6, 0x6e, + 0xfd, 0xae, 0xa1, 0xfb, 0xf1, 0x25, 0x4e, 0xb8, 0xb9, 0x9d, 0x3d, 0x86, 0x96, 0x79, 0xf2, 0xd0, + 0xa5, 0x78, 0xb0, 0xaf, 0x1e, 0x45, 0xf6, 0x8b, 0x47, 0x91, 0xfd, 0xd3, 0x82, 0xc2, 0x2f, 0x89, + 0xc5, 0x1a, 0x33, 0x4e, 0x19, 0x8e, 0x3e, 0x4f, 0xe2, 0xeb, 0xe2, 0x25, 0xa1, 0xc4, 0xe8, 0xea, + 0xec, 0x98, 0xe6, 0xe8, 0x77, 0x35, 0x58, 0x93, 0xba, 0x97, 0xde, 0x32, 0x14, 0x75, 0xdd, 0xd4, + 0xf2, 0x6a, 0xe5, 0xee, 0x9a, 0xca, 0xad, 0x6b, 0xbc, 0x53, 0xd6, 0xf8, 0xca, 0x0a, 0xd6, 0xbf, + 0xc6, 0x0a, 0xbc, 0xdf, 0xd6, 0xa1, 0xf3, 0x19, 0xe6, 0x57, 0x94, 0x5d, 0x88, 0xf3, 0x2c, 0x5b, + 0xda, 0xba, 0xde, 0x83, 0x0d, 0x36, 0x1b, 0x9d, 0x5f, 0x73, 0x53, 0xbf, 0x9b, 0x6c, 0xf6, 0x54, + 0x80, 0xe8, 0x2d, 0x00, 0x36, 0x1b, 0x0d, 0x03, 0xd5, 0xae, 0xea, 0xf2, 0xcd, 0x66, 0x1a, 0x81, + 0xde, 0x80, 0x96, 0x3f, 0x1b, 0x61, 0xc6, 0x28, 0xcb, 0x8a, 0xfa, 0xed, 0xcf, 0x3e, 0x96, 0xb0, + 0xe0, 0xf5, 0x67, 0xa3, 0x88, 0xd1, 0x34, 0xc5, 0x91, 0xac, 0xdf, 0x8e, 0xdf, 0xf2, 0x67, 0xcf, + 0x14, 0x42, 0x68, 0x3d, 0x2d, 0xb4, 0xae, 0x2b, 0xad, 0xa7, 0xa5, 0xd6, 0xd3, 0xd9, 0x28, 0xd5, + 0x5a, 0x55, 0xe1, 0x6e, 0x9d, 0xda, 0x5a, 0x4f, 0x8d, 0x56, 0x55, 0xb5, 0x37, 0x4e, 0x2d, 0xad, + 0xa7, 0xa5, 0xd6, 0x56, 0xc1, 0xab, 0xb5, 0x7a, 0x7f, 0xaa, 0xc1, 0xc6, 0x61, 0x9a, 0x9f, 0x65, + 0xc1, 0x18, 0xa3, 0x07, 0xd0, 0xe6, 0x94, 0x07, 0xf1, 0x28, 0x17, 0xa0, 0x3e, 0xdb, 0x40, 0xa2, + 0x14, 0xc1, 0xdb, 0xd0, 0x49, 0x31, 0x0b, 0xd3, 0x5c, 0x53, 0xd4, 0x77, 0x1b, 0xe2, 0x0c, 0x51, + 0x38, 0x45, 0xb2, 0x0f, 0xb7, 0xe5, 0xdc, 0x88, 0x24, 0x23, 0x55, 0xb4, 0xa7, 0x34, 0xc2, 0xda, + 0x55, 0x7d, 0x39, 0x75, 0x94, 0x7c, 0x6a, 0x26, 0xd0, 0xb7, 0xa1, 0x6f, 0xe8, 0x45, 0x33, 0x2b, + 0xa9, 0x95, 0xeb, 0x36, 0x35, 0xf5, 0x99, 0x46, 0x7b, 0x5f, 0x99, 0x1c, 0x22, 0xc9, 0xf8, 0x59, + 0xc0, 0x03, 0xd1, 0xe8, 0xa4, 0xf2, 0xe4, 0xcc, 0xb4, 0xb5, 0x05, 0x88, 0xbe, 0x03, 0x7d, 0xae, + 0xf3, 0x2d, 0x1a, 0x15, 0x34, 0x6a, 0x37, 0xb7, 0xcc, 0xc4, 0x50, 0x13, 0x7f, 0x03, 0x7a, 0x25, + 0xb1, 0x6c, 0x9b, 0x94, 0xbd, 0x5d, 0x83, 0x15, 0xd1, 0xe4, 0xfd, 0x41, 0x39, 0x4b, 0x45, 0xce, + 0x7b, 0xf2, 0x20, 0xb7, 0x5c, 0xd5, 0x3e, 0xd8, 0x2c, 0x1a, 0x20, 0xed, 0x0c, 0x79, 0x78, 0x2b, + 0xb7, 0xfc, 0x10, 0x36, 0xb9, 0x31, 0x7d, 0x14, 0x05, 0x3c, 0xd0, 0xa9, 0x37, 0x57, 0x27, 0xf5, + 0xc2, 0xfc, 0x1e, 0xaf, 0x2e, 0xf4, 0x6d, 0xe8, 0xa8, 0xce, 0x5c, 0x2b, 0x54, 0xf6, 0xb5, 0x15, + 0x4e, 0xaa, 0xf0, 0x3e, 0x82, 0xd6, 0x90, 0x44, 0x99, 0xb2, 0xce, 0x85, 0x66, 0x98, 0x33, 0x86, + 0x93, 0xa2, 0x45, 0x29, 0x40, 0x51, 0x1e, 0x65, 0x57, 0xab, 0x9d, 0xa1, 0x00, 0x8f, 0x02, 0xa8, + 0x93, 0x55, 0x6a, 0xdb, 0x86, 0x35, 0x3b, 0x04, 0x14, 0x20, 0xe2, 0x6c, 0x1a, 0xcc, 0xcc, 0xd6, + 0xcb, 0x38, 0x9b, 0x06, 0x33, 0xb5, 0x40, 0x17, 0x9a, 0x2f, 0x03, 0x12, 0x87, 0xfa, 0xc1, 0xce, + 0xf1, 0x0b, 0xb0, 0x54, 0xe8, 0xd8, 0x0a, 0xff, 0x58, 0x87, 0xb6, 0xd2, 0xa8, 0x0c, 0xde, 0x86, + 0xb5, 0x30, 0x08, 0x27, 0x46, 0xa5, 0x04, 0xd0, 0xbb, 0x85, 0x21, 0xd5, 0x8b, 0x7a, 0x69, 0x6a, + 0x61, 0xdb, 0x43, 0x80, 0xec, 0x2a, 0x48, 0x2d, 0xef, 0x2c, 0xa5, 0x6e, 0x09, 0x22, 0x65, 0xf0, + 0x07, 0xd0, 0x51, 0xf1, 0xa9, 0x79, 0x9c, 0x55, 0x3c, 0x6d, 0x45, 0xa6, 0xb8, 0x1e, 0x89, 0x4b, + 0x51, 0xc0, 0x55, 0x13, 0xde, 0x3e, 0x78, 0xab, 0x42, 0x2e, 0x57, 0xb2, 0x2f, 0xbf, 0x1f, 0x27, + 0x9c, 0x5d, 0xfb, 0x8a, 0x76, 0xf0, 0x18, 0xa0, 0x44, 0x8a, 0x7a, 0x76, 0x81, 0xaf, 0x8b, 0xcb, + 0xdf, 0x05, 0xbe, 0x16, 0x6b, 0xbf, 0x0c, 0xe2, 0xbc, 0x70, 0xaa, 0x02, 0x7e, 0x50, 0x7f, 0x5c, + 0xf3, 0x42, 0xd8, 0x7c, 0x2a, 0x0e, 0x4c, 0x8b, 0xbd, 0x72, 0xe8, 0x39, 0x4b, 0x0f, 0x3d, 0xa7, + 0x78, 0x67, 0xee, 0x41, 0x9d, 0xa6, 0xba, 0x11, 0xae, 0xd3, 0xb4, 0x54, 0xe4, 0x58, 0x8a, 0xbc, + 0x7f, 0x38, 0x00, 0xa5, 0x16, 0x74, 0x02, 0x03, 0x42, 0x47, 0xa2, 0x8f, 0x23, 0x21, 0x56, 0x05, + 0x69, 0xc4, 0x70, 0x98, 0xb3, 0x8c, 0x5c, 0x62, 0xdd, 0xea, 0xef, 0x98, 0x63, 0xaa, 0x62, 0x9c, + 0x7f, 0x97, 0xd0, 0x13, 0xc5, 0x28, 0x2b, 0x97, 0x5f, 0xb0, 0xa1, 0x9f, 0xc0, 0x9d, 0x52, 0x68, + 0x64, 0xc9, 0xab, 0xdf, 0x28, 0xef, 0xb6, 0x91, 0x17, 0x95, 0xb2, 0x7e, 0x04, 0xb7, 0x09, 0x1d, + 0x7d, 0x99, 0xe3, 0xbc, 0x22, 0xa9, 0x71, 0xa3, 0xa4, 0x3e, 0xa1, 0x5f, 0x48, 0x8e, 0x52, 0xce, + 0x17, 0x70, 0xcf, 0x5a, 0xa8, 0x48, 0x7b, 0x4b, 0x9a, 0x73, 0xa3, 0xb4, 0x1d, 0x63, 0x97, 0x28, + 0x0c, 0xa5, 0xc8, 0x4f, 0x61, 0x87, 0xd0, 0xd1, 0x55, 0x40, 0xf8, 0xbc, 0xbc, 0xb5, 0x57, 0xad, + 0xf3, 0x45, 0x40, 0x78, 0x55, 0x98, 0x5a, 0xe7, 0x14, 0xb3, 0x71, 0x65, 0x9d, 0xeb, 0xaf, 0x5a, + 0xe7, 0xb1, 0xe4, 0x28, 0xe5, 0x3c, 0x85, 0x3e, 0xa1, 0xf3, 0xf6, 0x34, 0x6f, 0x94, 0xb2, 0x49, + 0x68, 0xd5, 0x96, 0x43, 0xe8, 0x67, 0x38, 0xe4, 0x94, 0xd9, 0xb1, 0xb0, 0x71, 0xa3, 0x8c, 0x2d, + 0xcd, 0x60, 0x84, 0x78, 0x5f, 0x42, 0xe7, 0xc7, 0xf9, 0x18, 0xf3, 0xf8, 0xdc, 0xe4, 0xfc, 0x7f, + 0xbb, 0xcc, 0xfc, 0xab, 0x0e, 0xed, 0xc3, 0x31, 0xa3, 0x79, 0x5a, 0xa9, 0xda, 0x2a, 0x87, 0x17, + 0xaa, 0xb6, 0xa4, 0x91, 0x55, 0x5b, 0x51, 0x7f, 0x08, 0x1d, 0x75, 0xaf, 0xd1, 0x0c, 0xaa, 0x0a, + 0xa1, 0xc5, 0xa4, 0x2f, 0xee, 0x51, 0x8a, 0xed, 0x40, 0xdf, 0x11, 0x35, 0x57, 0xb5, 0x1a, 0x95, + 0x6e, 0xf2, 0xe1, 0xbc, 0xcc, 0xba, 0x23, 0xe8, 0x4e, 0x94, 0x6f, 0x34, 0x97, 0x0a, 0xc0, 0x77, + 0x0a, 0xe3, 0xca, 0x35, 0xec, 0xdb, 0x3e, 0x54, 0xae, 0xee, 0x4c, 0x6c, 0xb7, 0xbe, 0x0f, 0x20, + 0x9a, 0xe6, 0x51, 0x51, 0xa8, 0xec, 0x5f, 0x04, 0xe6, 0x84, 0x50, 0x8d, 0xb5, 0x1c, 0x0e, 0x4e, + 0xa1, 0xbf, 0x20, 0x73, 0x49, 0x99, 0xfa, 0x96, 0x5d, 0xa6, 0xca, 0x8b, 0x93, 0xcd, 0x6a, 0xd7, + 0xae, 0xbf, 0xd6, 0xd4, 0xa3, 0x41, 0xf9, 0x8a, 0xfb, 0x18, 0xba, 0x89, 0x6a, 0xbe, 0xcc, 0x06, + 0xd8, 0x37, 0x30, 0xbb, 0x31, 0xf3, 0x3b, 0x89, 0xdd, 0xa6, 0x7d, 0x08, 0x9d, 0x50, 0x7a, 0x60, + 0xe9, 0x46, 0x58, 0xce, 0xf1, 0xdb, 0xa1, 0xb5, 0xdb, 0x95, 0x46, 0xd1, 0xf9, 0x3a, 0x8d, 0xa2, + 0x7e, 0xf7, 0x5b, 0xf5, 0x4b, 0xe3, 0xe0, 0x9f, 0xeb, 0xd0, 0x78, 0x32, 0x3c, 0x42, 0x67, 0xb0, + 0x35, 0xff, 0x47, 0x10, 0xdd, 0xd7, 0x66, 0xad, 0xf8, 0x8b, 0x38, 0x78, 0xb0, 0x72, 0x5e, 0xb7, + 0xec, 0xb7, 0x90, 0x0f, 0x9b, 0x73, 0xff, 0x7f, 0x50, 0x71, 0xd4, 0x2c, 0xff, 0xc7, 0x36, 0xb8, + 0xbf, 0x6a, 0xda, 0x96, 0x39, 0x77, 0x47, 0x30, 0x32, 0x97, 0xbf, 0xb6, 0x18, 0x99, 0xab, 0xae, + 0x16, 0xb7, 0xd0, 0xf7, 0x61, 0x5d, 0xfd, 0x11, 0x42, 0xc5, 0xc5, 0xa5, 0xf2, 0xaf, 0x69, 0x70, + 0x67, 0x0e, 0x6b, 0x18, 0x9f, 0x43, 0xb7, 0xf2, 0x1b, 0x11, 0xbd, 0x51, 0xd1, 0x55, 0xfd, 0xa1, + 0x34, 0x78, 0x73, 0xf9, 0xa4, 0x91, 0x76, 0x08, 0x50, 0xfe, 0x34, 0x40, 0xae, 0xa6, 0x5e, 0xf8, + 0x31, 0x35, 0xb8, 0xb7, 0x64, 0xc6, 0x08, 0x39, 0x83, 0xad, 0xf9, 0x07, 0x7c, 0x34, 0xe7, 0xd5, + 0xf9, 0xe7, 0x73, 0xb3, 0x95, 0x2b, 0x5f, 0xfe, 0xa5, 0xd8, 0xf9, 0x67, 0x79, 0x23, 0x76, 0xc5, + 0x4f, 0x01, 0x23, 0x76, 0xe5, 0x7b, 0xfe, 0x2d, 0xf4, 0x39, 0xf4, 0xaa, 0xef, 0xdc, 0xa8, 0x70, + 0xd2, 0xd2, 0x87, 0xfe, 0xc1, 0x5b, 0x2b, 0x66, 0x8d, 0xc0, 0x0f, 0x60, 0x4d, 0x3d, 0x60, 0x17, + 0xe9, 0x68, 0xbf, 0x7b, 0x0f, 0xb6, 0xab, 0x48, 0xc3, 0xf5, 0x10, 0xd6, 0xd5, 0xed, 0xd2, 0x04, + 0x40, 0xe5, 0xb2, 0x39, 0xe8, 0xd8, 0x58, 0xef, 0xd6, 0xc3, 0x5a, 0xa1, 0x27, 0xab, 0xe8, 0xc9, + 0x96, 0xe9, 0xb1, 0x36, 0xe7, 0x7c, 0x5d, 0xa6, 0xeb, 0xa3, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, + 0x4c, 0xa9, 0xa8, 0x4d, 0xd0, 0x1f, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/grpc/types/api.proto b/vendor/github.com/containerd/containerd/api/grpc/types/api.proto new file mode 100644 index 0000000..902f6c1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/grpc/types/api.proto @@ -0,0 +1,344 @@ +syntax = "proto3"; + +package types; + +import "google/protobuf/timestamp.proto"; + +service API { + rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {} + rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {} + rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {} + rpc Signal(SignalRequest) returns (SignalResponse) {} + rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {} + rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {} + rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {} + rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {} + rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {} + rpc State(StateRequest) returns (StateResponse) {} + rpc Events(EventsRequest) returns (stream Event) {} + rpc Stats(StatsRequest) returns (StatsResponse) {} +} + +message GetServerVersionRequest { +} + +message GetServerVersionResponse { + uint32 major = 1; + uint32 minor = 2; + uint32 patch = 3; + string revision = 4; +} + +message UpdateProcessRequest { + string id = 1; + string pid = 2; + bool closeStdin = 3; // Close stdin of the container + uint32 width = 4; + uint32 height = 5; +} + +message UpdateProcessResponse { +} + +message CreateContainerRequest { + string id = 1; // ID of container + string bundlePath = 2; // path to OCI bundle + string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional) + string stdin = 4; // path to the file where stdin will be read (optional) + string stdout = 5; // path to file where stdout will be written (optional) + string stderr = 6; // path to file where stderr will be written (optional) + repeated string labels = 7; + bool noPivotRoot = 8; + string runtime = 9; + repeated string runtimeArgs = 10; + string checkpointDir = 11; // Directory where checkpoints are stored +} + +message CreateContainerResponse { + Container container = 1; +} + +message SignalRequest { + string id = 1; // ID of container + string pid = 2; // PID of process inside container + uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal" +} + +message SignalResponse { +} + +message AddProcessRequest { + string id = 1; // ID of container + bool terminal = 2; // Use tty for container stdio + User user = 3; // User under which process will be run + repeated string args = 4; // Arguments for process, first is binary path itself + repeated string env = 5; // List of environment variables for process + string cwd = 6; // Working directory of process + string pid = 7; // Process ID + string stdin = 8; // path to the file where stdin will be read (optional) + string stdout = 9; // path to file where stdout will be written (optional) + string stderr = 10; // path to file where stderr will be written (optional) + repeated string capabilities = 11; + string apparmorProfile = 12; + string selinuxLabel = 13; + bool noNewPrivileges = 14; + repeated Rlimit rlimits = 15; +} + +message Rlimit { + string type = 1; + uint64 soft = 2; + uint64 hard = 3; +} + +message User { + uint32 uid = 1; // UID of user + uint32 gid = 2; // GID of user + repeated uint32 additionalGids = 3; // Additional groups to which user will be added +} + +message AddProcessResponse { + uint32 systemPid = 1; +} + +message CreateCheckpointRequest { + string id = 1; // ID of container + Checkpoint checkpoint = 2; // Checkpoint configuration + string checkpointDir = 3; // Directory where checkpoints are stored +} + +message CreateCheckpointResponse { +} + +message DeleteCheckpointRequest { + string id = 1; // ID of container + string name = 2; // Name of checkpoint + string checkpointDir = 3; // Directory where checkpoints are stored +} + +message DeleteCheckpointResponse { +} + +message ListCheckpointRequest { + string id = 1; // ID of container + string checkpointDir = 2; // Directory where checkpoints are stored +} + +message Checkpoint { + string name = 1; // Name of checkpoint + bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not + bool tcp = 3; // allow open tcp connections + bool unixSockets = 4; // allow external unix sockets + bool shell = 5; // allow shell-jobs + repeated string emptyNS = 6; +} + +message ListCheckpointResponse { + repeated Checkpoint checkpoints = 1; // List of checkpoints +} + +message StateRequest { + string id = 1; // container id for a single container +} + +message ContainerState { + string status = 1; +} + +message Process { + string pid = 1; + bool terminal = 2; // Use tty for container stdio + User user = 3; // User under which process will be run + repeated string args = 4; // Arguments for process, first is binary path itself + repeated string env = 5; // List of environment variables for process + string cwd = 6; // Working directory of process + uint32 systemPid = 7; + string stdin = 8; // path to the file where stdin will be read (optional) + string stdout = 9; // path to file where stdout will be written (optional) + string stderr = 10; // path to file where stderr will be written (optional) + repeated string capabilities = 11; + string apparmorProfile = 12; + string selinuxLabel = 13; + bool noNewPrivileges = 14; + repeated Rlimit rlimits = 15; +} + +message Container { + string id = 1; // ID of container + string bundlePath = 2; // Path to OCI bundle + repeated Process processes = 3; // List of processes which run in container + string status = 4; // Container status ("running", "paused", etc.) + repeated string labels = 5; + repeated uint32 pids = 6; + string runtime = 7; // runtime used to execute the container +} + +// Machine is information about machine on which containerd is run +message Machine { + uint32 cpus = 1; // number of cpus + uint64 memory = 2; // amount of memory +} + +// StateResponse is information about containerd daemon +message StateResponse { + repeated Container containers = 1; + Machine machine = 2; +} + +message UpdateContainerRequest { + string id = 1; // ID of container + string pid = 2; + string status = 3; // Status to which containerd will try to change + UpdateResource resources =4; +} + +message UpdateResource { + uint64 blkioWeight = 1; + uint64 cpuShares = 2; + uint64 cpuPeriod = 3; + uint64 cpuQuota = 4; + string cpusetCpus = 5; + string cpusetMems = 6; + uint64 memoryLimit = 7; + uint64 memorySwap = 8; + uint64 memoryReservation = 9; + uint64 kernelMemoryLimit = 10; + uint64 kernelTCPMemoryLimit = 11; + uint64 blkioLeafWeight = 12; + repeated WeightDevice blkioWeightDevice = 13; + repeated ThrottleDevice blkioThrottleReadBpsDevice = 14; + repeated ThrottleDevice blkioThrottleWriteBpsDevice = 15; + repeated ThrottleDevice blkioThrottleReadIopsDevice = 16; + repeated ThrottleDevice blkioThrottleWriteIopsDevice = 17; + uint64 pidsLimit = 18; +} + +message BlockIODevice { + int64 major = 1; + int64 minor = 2; +} + +message WeightDevice { + BlockIODevice blkIODevice = 1; + uint32 weight = 2; + uint32 leafWeight = 3; +} + +message ThrottleDevice { + BlockIODevice blkIODevice = 1; + uint64 rate = 2; +} + +message UpdateContainerResponse { +} + +message EventsRequest { + // Tag 1 is deprecated (old uint64 timestamp) + google.protobuf.Timestamp timestamp = 2; + bool storedOnly = 3; + string id = 4; +} + +message Event { + string type = 1; + string id = 2; + uint32 status = 3; + string pid = 4; + // Tag 5 is deprecated (old uint64 timestamp) + google.protobuf.Timestamp timestamp = 6; +} + +message NetworkStats { + string name = 1; // name of network interface + uint64 rx_bytes = 2; + uint64 rx_Packets = 3; + uint64 Rx_errors = 4; + uint64 Rx_dropped = 5; + uint64 Tx_bytes = 6; + uint64 Tx_packets = 7; + uint64 Tx_errors = 8; + uint64 Tx_dropped = 9; +} + +message CpuUsage { + uint64 total_usage = 1; + repeated uint64 percpu_usage = 2; + uint64 usage_in_kernelmode = 3; + uint64 usage_in_usermode = 4; +} + +message ThrottlingData { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message CpuStats { + CpuUsage cpu_usage = 1; + ThrottlingData throttling_data = 2; + uint64 system_usage = 3; +} + +message PidsStats { + uint64 current = 1; + uint64 limit = 2; +} + +message MemoryData { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message MemoryStats { + uint64 cache = 1; + MemoryData usage = 2; + MemoryData swap_usage = 3; + MemoryData kernel_usage = 4; + map stats = 5; +} + +message BlkioStatsEntry { + uint64 major = 1; + uint64 minor = 2; + string op = 3; + uint64 value = 4; +} + +message BlkioStats { + repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device + repeated BlkioStatsEntry io_serviced_recursive = 2; + repeated BlkioStatsEntry io_queued_recursive = 3; + repeated BlkioStatsEntry io_service_time_recursive = 4; + repeated BlkioStatsEntry io_wait_time_recursive = 5; + repeated BlkioStatsEntry io_merged_recursive = 6; + repeated BlkioStatsEntry io_time_recursive = 7; + repeated BlkioStatsEntry sectors_recursive = 8; +} + +message HugetlbStats { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message CgroupStats { + CpuStats cpu_stats = 1; + MemoryStats memory_stats = 2; + BlkioStats blkio_stats = 3; + map hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage" + PidsStats pids_stats = 5; +} + +message StatsResponse { + repeated NetworkStats network_stats = 1; + CgroupStats cgroup_stats = 2; + // Tag 3 is deprecated (old uint64 timestamp) + google.protobuf.Timestamp timestamp = 4; +}; + +message StatsRequest { + string id = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/http/pprof/pprof.go b/vendor/github.com/containerd/containerd/api/http/pprof/pprof.go new file mode 100644 index 0000000..5a61e16 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/http/pprof/pprof.go @@ -0,0 +1,19 @@ +package pprof + +import ( + // expvar init routine adds the "/debug/vars" handler + _ "expvar" + "net/http" + // net/http/pprof installs the "/debug/pprof/{block,heap,goroutine,threadcreate}" handler + _ "net/http/pprof" + + "github.com/Sirupsen/logrus" +) + +// Enable registers the "/debug/pprof" handler +func Enable(address string) { + http.Handle("/", http.RedirectHandler("/debug/pprof", http.StatusMovedPermanently)) + + go http.ListenAndServe(address, nil) + logrus.Debug("pprof listening in address %s", address) +} diff --git a/vendor/github.com/containerd/containerd/archutils/epoll.go b/vendor/github.com/containerd/containerd/archutils/epoll.go new file mode 100644 index 0000000..3f08d8f --- /dev/null +++ b/vendor/github.com/containerd/containerd/archutils/epoll.go @@ -0,0 +1,22 @@ +// +build linux,!arm64 + +package archutils + +import ( + "syscall" +) + +// EpollCreate1 directly calls syscall.EpollCreate1 +func EpollCreate1(flag int) (int, error) { + return syscall.EpollCreate1(flag) +} + +// EpollCtl directly calls syscall.EpollCtl +func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { + return syscall.EpollCtl(epfd, op, fd, event) +} + +// EpollWait directly calls syscall.EpollWait +func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + return syscall.EpollWait(epfd, events, msec) +} diff --git a/vendor/github.com/containerd/containerd/archutils/epoll_arm64.go b/vendor/github.com/containerd/containerd/archutils/epoll_arm64.go new file mode 100644 index 0000000..3c6e63d --- /dev/null +++ b/vendor/github.com/containerd/containerd/archutils/epoll_arm64.go @@ -0,0 +1,73 @@ +// +build linux,arm64 + +package archutils + +// #include +/* +int EpollCreate1(int flag) { + return epoll_create1(flag); +} + +int EpollCtl(int efd, int op,int sfd, int events, int fd) { + struct epoll_event event; + event.events = events; + event.data.fd = fd; + + return epoll_ctl(efd, op, sfd, &event); +} + +struct event_t { + uint32_t events; + int fd; +}; + +struct epoll_event events[128]; +int run_epoll_wait(int fd, struct event_t *event) { + int n, i; + n = epoll_wait(fd, events, 128, -1); + for (i = 0; i < n; i++) { + event[i].events = events[i].events; + event[i].fd = events[i].data.fd; + } + return n; +} +*/ +import "C" + +import ( + "fmt" + "syscall" + "unsafe" +) + +// EpollCreate1 calls a C implementation +func EpollCreate1(flag int) (int, error) { + fd := int(C.EpollCreate1(C.int(flag))) + if fd < 0 { + return fd, fmt.Errorf("failed to create epoll, errno is %d", fd) + } + return fd, nil +} + +// EpollCtl calls a C implementation +func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { + errno := C.EpollCtl(C.int(epfd), C.int(syscall.EPOLL_CTL_ADD), C.int(fd), C.int(event.Events), C.int(event.Fd)) + if errno < 0 { + return fmt.Errorf("Failed to ctl epoll") + } + return nil +} + +// EpollWait calls a C implementation +func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + var c_events [128]C.struct_event_t + n := int(C.run_epoll_wait(C.int(epfd), (*C.struct_event_t)(unsafe.Pointer(&c_events)))) + if n < 0 { + return int(n), fmt.Errorf("Failed to wait epoll") + } + for i := 0; i < n; i++ { + events[i].Fd = int32(c_events[i].fd) + events[i].Events = uint32(c_events[i].events) + } + return int(n), nil +} diff --git a/vendor/github.com/containerd/containerd/containerd-shim/console.go b/vendor/github.com/containerd/containerd/containerd-shim/console.go new file mode 100644 index 0000000..d2615ce --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd-shim/console.go @@ -0,0 +1,56 @@ +// +build !solaris + +package containerdShim + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +// NewConsole returns an initialized console that can be used within a container by copying bytes +// from the master side to the slave that is attached as the tty for the container's init process. +func newConsole(uid, gid int) (*os.File, string, error) { + master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + console, err := ptsname(master) + if err != nil { + return nil, "", err + } + if err := unlockpt(master); err != nil { + return nil, "", err + } + if err := os.Chmod(console, 0600); err != nil { + return nil, "", err + } + if err := os.Chown(console, uid, gid); err != nil { + return nil, "", err + } + return master, console, nil +} + +func ioctl(fd uintptr, flag, data uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + var n int32 + if err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/containerd/containerd-shim/console_solaris.go b/vendor/github.com/containerd/containerd/containerd-shim/console_solaris.go new file mode 100644 index 0000000..746e60d --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd-shim/console_solaris.go @@ -0,0 +1,14 @@ +// +build solaris + +package containerdShim + +import ( + "errors" + "os" +) + +// NewConsole returns an initalized console that can be used within a container by copying bytes +// from the master side to the slave that is attached as the tty for the container's init process. +func newConsole(uid, gid int) (*os.File, string, error) { + return nil, "", errors.New("newConsole not implemented on Solaris") +} diff --git a/vendor/github.com/containerd/containerd/containerd-shim/main.go b/vendor/github.com/containerd/containerd/containerd-shim/main.go new file mode 100644 index 0000000..6d2388c --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd-shim/main.go @@ -0,0 +1,169 @@ +package containerdShim + +import ( + "flag" + "fmt" + "os" + "os/signal" + "path/filepath" + "runtime" + "syscall" + + "github.com/containerd/containerd/osutils" + "github.com/containerd/console" +) + +func writeMessage(f *os.File, level string, err error) { + fmt.Fprintf(f, `{"level": "%s","msg": "%s"}`, level, err) +} + +type controlMessage struct { + Type int + Width int + Height int +} + +// containerd-shim is a small shim that sits in front of a runtime implementation +// that allows it to be repartented to init and handle reattach from the caller. +// +// the cwd of the shim should be the path to the state directory where the shim +// can locate fifos and other information. +// Arg0: id of the container +// Arg1: bundle path +// Arg2: runtime binary +func Main() { + flag.Parse() + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + f, err := os.OpenFile(filepath.Join(cwd, "shim-log.json"), os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666) + if err != nil { + panic(err) + } + if err := start(f); err != nil { + // this means that the runtime failed starting the container and will have the + // proper error messages in the runtime log so we should to treat this as a + // shim failure because the sim executed properly + if err == errRuntime { + f.Close() + return + } + // log the error instead of writing to stderr because the shim will have + // /dev/null as it's stdio because it is supposed to be reparented to system + // init and will not have anyone to read from it + writeMessage(f, "error", err) + f.Close() + os.Exit(1) + } +} + +func start(log *os.File) error { + // start handling signals as soon as possible so that things are properly reaped + // or if runtime exits before we hit the handler + signals := make(chan os.Signal, 2048) + signal.Notify(signals) + // set the shim as the subreaper for all orphaned processes created by the container + if err := osutils.SetSubreaper(1); err != nil { + return err + } + // open the exit pipe + f, err := os.OpenFile("exit", syscall.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + control, err := os.OpenFile("control", syscall.O_RDWR, 0) + if err != nil { + return err + } + defer control.Close() + p, err := newProcess(flag.Arg(0), flag.Arg(1), flag.Arg(2)) + if err != nil { + return err + } + if err := p.openIO(); err != nil { + return err + } + defer func() { + if err := p.Close(); err != nil { + writeMessage(log, "warn", err) + } + }() + if err := p.create(); err != nil { + p.delete() + return err + } + if err := <-p.consoleErrCh; err != nil { + p.delete() + return err + } + msgC := make(chan controlMessage, 32) + go func() { + for { + var m controlMessage + if _, err := fmt.Fscanf(control, "%d %d %d\n", &m.Type, &m.Width, &m.Height); err != nil { + continue + } + msgC <- m + } + }() + if runtime.GOOS == "solaris" { + return nil + } + var exitShim bool + for { + select { + case s := <-signals: + switch s { + case syscall.SIGCHLD: + exits, _ := osutils.Reap(false) + for _, e := range exits { + // check to see if runtime is one of the processes that has exited + if e.Pid == p.pid() { + exitShim = true + writeInt("exitStatus", e.Status) + } + } + } + // runtime has exited so the shim can also exit + if exitShim { + // kill all processes in the container incase it was not running in + // its own PID namespace + p.killAll() + // wait for all the processes and IO to finish + p.Wait() + // delete the container from the runtime + p.delete() + // the close of the exit fifo will happen when the shim exits + return nil + } + case msg := <-msgC: + switch msg.Type { + case 0: + // close stdin + if p.stdinCloser != nil { + p.stdinCloser.Close() + } + case 1: + if p.console == nil { + continue + } + p.console.Resize(console.WinSize{ + Width: uint16(msg.Width), + Height: uint16(msg.Height), + }) + } + } + } +} + +func writeInt(path string, i int) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + _, err = fmt.Fprintf(f, "%d", i) + return err +} diff --git a/vendor/github.com/containerd/containerd/containerd-shim/process.go b/vendor/github.com/containerd/containerd/containerd-shim/process.go new file mode 100644 index 0000000..2cb362d --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd-shim/process.go @@ -0,0 +1,308 @@ +package containerdShim + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "sync" + "syscall" + "time" + + "github.com/containerd/containerd/osutils" + "github.com/containerd/containerd/specs" + "github.com/containerd/console" + runc "github.com/containerd/go-runc" +) + +var errRuntime = errors.New("shim: runtime execution error") + +type checkpoint struct { + // Timestamp is the time that checkpoint happened + Created time.Time `json:"created"` + // Name is the name of the checkpoint + Name string `json:"name"` + // TCP checkpoints open tcp connections + TCP bool `json:"tcp"` + // UnixSockets persists unix sockets in the checkpoint + UnixSockets bool `json:"unixSockets"` + // Shell persists tty sessions in the checkpoint + Shell bool `json:"shell"` + // Exit exits the container after the checkpoint is finished + Exit bool `json:"exit"` + // EmptyNS tells CRIU not to restore a particular namespace + EmptyNS []string `json:"emptyNS,omitempty"` +} + +type processState struct { + specs.ProcessSpec + Exec bool `json:"exec"` + Stdin string `json:"containerdStdin"` + Stdout string `json:"containerdStdout"` + Stderr string `json:"containerdStderr"` + RuntimeArgs []string `json:"runtimeArgs"` + NoPivotRoot bool `json:"noPivotRoot"` + CheckpointPath string `json:"checkpoint"` + RootUID int `json:"rootUID"` + RootGID int `json:"rootGID"` +} + +type process struct { + sync.WaitGroup + id string + bundle string + stdio *stdio + containerPid int + checkpoint *checkpoint + checkpointPath string + shimIO *IO + stdinCloser io.Closer + state *processState + runtime string + ioCleanupFn func() + + socket *runc.Socket + console console.Console + consoleErrCh chan error +} + +func newProcess(id, bundle, runtimeName string) (*process, error) { + p := &process{ + id: id, + bundle: bundle, + runtime: runtimeName, + consoleErrCh: make(chan error, 1), + } + s, err := loadProcess() + if err != nil { + return nil, err + } + p.state = s + if s.CheckpointPath != "" { + cpt, err := loadCheckpoint(s.CheckpointPath) + if err != nil { + return nil, err + } + p.checkpoint = cpt + p.checkpointPath = s.CheckpointPath + } + return p, nil +} + +func loadProcess() (*processState, error) { + f, err := os.Open("process.json") + if err != nil { + return nil, err + } + defer f.Close() + var s processState + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + return &s, nil +} + +func loadCheckpoint(checkpointPath string) (*checkpoint, error) { + f, err := os.Open(filepath.Join(checkpointPath, "config.json")) + if err != nil { + return nil, err + } + defer f.Close() + var cpt checkpoint + if err := json.NewDecoder(f).Decode(&cpt); err != nil { + return nil, err + } + return &cpt, nil +} + +func (p *process) create() error { + cwd, err := os.Getwd() + if err != nil { + return err + } + logPath := filepath.Join(cwd, "log.json") + args := append([]string{ + "--log", logPath, + "--log-format", "json", + }, p.state.RuntimeArgs...) + if p.state.Exec { + args = append(args, "exec", + "-d", + "--process", filepath.Join(cwd, "process.json"), + ) + if p.socket != nil { + args = append(args, "--console-socket", p.socket.Path()) + } + + } else if p.checkpoint != nil { + args = append(args, "restore", + "-d", + "--image-path", p.checkpointPath, + "--work-path", filepath.Join(p.checkpointPath, "criu.work", "restore-"+time.Now().Format(time.RFC3339Nano)), + ) + add := func(flags ...string) { + args = append(args, flags...) + } + if p.checkpoint.Shell { + add("--shell-job") + } + if p.checkpoint.TCP { + add("--tcp-established") + } + if p.checkpoint.UnixSockets { + add("--ext-unix-sk") + } + if p.state.NoPivotRoot { + add("--no-pivot") + } + for _, ns := range p.checkpoint.EmptyNS { + add("--empty-ns", ns) + } + + } else { + args = append(args, "create", + "--bundle", p.bundle, + ) + if p.socket != nil { + args = append(args, "--console-socket", p.socket.Path()) + } + if p.state.NoPivotRoot { + args = append(args, "--no-pivot") + } + } + args = append(args, + "--pid-file", filepath.Join(cwd, "pid"), + p.id, + ) + cmd := exec.Command(p.runtime, args...) + cmd.Dir = p.bundle + cmd.Stdin = p.stdio.stdin + cmd.Stdout = p.stdio.stdout + cmd.Stderr = p.stdio.stderr + // Call out to SetPDeathSig to set SysProcAttr as elements are platform specific + cmd.SysProcAttr = osutils.SetPDeathSig() + + if err := cmd.Start(); err != nil { + if exErr, ok := err.(*exec.Error); ok { + if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist { + return fmt.Errorf("%s not installed on system", p.runtime) + } + } + return err + } + if runtime.GOOS != "solaris" { + // Since current logic dictates that we need a pid at the end of p.create + // we need to call runtime start as well on Solaris hence we need the + // pipes to stay open. + p.stdio.stdout.Close() + p.stdio.stderr.Close() + } + if err := cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + return errRuntime + } + return err + } + data, err := ioutil.ReadFile("pid") + if err != nil { + return err + } + pid, err := strconv.Atoi(string(data)) + if err != nil { + return err + } + p.containerPid = pid + return nil +} + +func (p *process) pid() int { + return p.containerPid +} + +func (p *process) delete() error { + if !p.state.Exec { + cmd := exec.Command(p.runtime, append(p.state.RuntimeArgs, "delete", p.id)...) + cmd.SysProcAttr = osutils.SetPDeathSig() + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %v", out, err) + } + } + return nil +} + +// IO holds all 3 standard io Reader/Writer (stdin,stdout,stderr) +type IO struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser +} + +func (p *process) initializeIO(rootuid, rootgid int) (i *IO, err error) { + var fds []uintptr + i = &IO{} + // cleanup in case of an error + defer func() { + if err != nil { + for _, fd := range fds { + syscall.Close(int(fd)) + } + } + }() + // STDIN + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + fds = append(fds, r.Fd(), w.Fd()) + p.stdio.stdin, i.Stdin = r, w + // STDOUT + if r, w, err = os.Pipe(); err != nil { + return nil, err + } + fds = append(fds, r.Fd(), w.Fd()) + p.stdio.stdout, i.Stdout = w, r + // STDERR + if r, w, err = os.Pipe(); err != nil { + return nil, err + } + fds = append(fds, r.Fd(), w.Fd()) + p.stdio.stderr, i.Stderr = w, r + // change ownership of the pipes in case we are in a user namespace + for _, fd := range fds { + if err := syscall.Fchown(int(fd), rootuid, rootgid); err != nil { + return nil, err + } + } + return i, nil +} +func (p *process) Close() error { + err := p.stdio.Close() + if p.socket != nil { + p.socket.Close() + } + return err +} + +type stdio struct { + stdin *os.File + stdout *os.File + stderr *os.File +} + +func (s *stdio) Close() error { + err := s.stdin.Close() + if oerr := s.stdout.Close(); err == nil { + err = oerr + } + if oerr := s.stderr.Close(); err == nil { + err = oerr + } + return err +} diff --git a/vendor/github.com/containerd/containerd/containerd-shim/process_linux.go b/vendor/github.com/containerd/containerd/containerd-shim/process_linux.go new file mode 100644 index 0000000..69dc7fa --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd-shim/process_linux.go @@ -0,0 +1,190 @@ +// +build !solaris + +package containerdShim + +import ( + "fmt" + "io" + "os/exec" + "syscall" + "time" + + "github.com/containerd/containerd/osutils" + "github.com/containerd/console" + runc "github.com/containerd/go-runc" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" +) + +// openIO opens the pre-created fifo's for use with the container +// in RDWR so that they remain open if the other side stops listening +func (p *process) openIO() error { + p.stdio = &stdio{} + var ( + uid = p.state.RootUID + gid = p.state.RootGID + ) + + ctx, _ := context.WithTimeout(context.Background(), 15*time.Second) + + stdinCloser, err := fifo.OpenFifo(ctx, p.state.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return err + } + p.stdinCloser = stdinCloser + + if p.state.Terminal { + socket, err := runc.NewTempConsoleSocket() + if err != nil { + return err + } + p.socket = socket + consoleCh := p.waitConsole(socket) + + stdin, err := fifo.OpenFifo(ctx, p.state.Stdin, syscall.O_RDONLY, 0) + if err != nil { + return err + } + stdoutw, err := fifo.OpenFifo(ctx, p.state.Stdout, syscall.O_WRONLY, 0) + if err != nil { + return err + } + stdoutr, err := fifo.OpenFifo(ctx, p.state.Stdout, syscall.O_RDONLY, 0) + if err != nil { + return err + } + // open the fifos but wait until we receive the console before we start + // copying data back and forth between the two + go p.setConsole(consoleCh, stdin, stdoutw) + + p.Add(1) + p.ioCleanupFn = func() { + stdoutr.Close() + stdoutw.Close() + } + return nil + } + close(p.consoleErrCh) + i, err := p.initializeIO(uid, gid) + if err != nil { + return err + } + p.shimIO = i + // non-tty + ioClosers := []io.Closer{} + for _, pair := range []struct { + name string + dest func(wc io.WriteCloser, rc io.Closer) + }{ + { + p.state.Stdout, + func(wc io.WriteCloser, rc io.Closer) { + p.Add(1) + go func() { + io.Copy(wc, i.Stdout) + p.Done() + }() + }, + }, + { + p.state.Stderr, + func(wc io.WriteCloser, rc io.Closer) { + p.Add(1) + go func() { + io.Copy(wc, i.Stderr) + p.Done() + }() + }, + }, + } { + fw, err := fifo.OpenFifo(ctx, pair.name, syscall.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", pair.name, err) + } + fr, err := fifo.OpenFifo(ctx, pair.name, syscall.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", pair.name, err) + } + pair.dest(fw, fr) + ioClosers = append(ioClosers, fw, fr) + } + + f, err := fifo.OpenFifo(ctx, p.state.Stdin, syscall.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", p.state.Stdin, err) + } + p.ioCleanupFn = func() { + for _, c := range ioClosers { + c.Close() + } + } + go func() { + io.Copy(i.Stdin, f) + i.Stdin.Close() + f.Close() + }() + + return nil +} + +func (p *process) Wait() { + p.WaitGroup.Wait() + if p.ioCleanupFn != nil { + p.ioCleanupFn() + } + if p.console != nil { + p.console.Close() + } +} + +func (p *process) killAll() error { + if !p.state.Exec { + cmd := exec.Command(p.runtime, append(p.state.RuntimeArgs, "kill", "--all", p.id, "SIGKILL")...) + cmd.SysProcAttr = osutils.SetPDeathSig() + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %v", out, err) + } + } + return nil +} + +func (p *process) setConsole(c <-chan *consoleR, stdin io.Reader, stdout io.Writer) { + r := <-c + if r.err != nil { + p.consoleErrCh <- r.err + return + } + close(p.consoleErrCh) + p.console = r.c + // copy from the console into the provided fifos + go io.Copy(r.c, stdin) + go func() { + io.Copy(stdout, r.c) + p.Done() + }() +} + +type consoleR struct { + c console.Console + err error +} + +func (p *process) waitConsole(socket *runc.Socket) <-chan *consoleR { + c := make(chan *consoleR, 1) + go func() { + master, err := socket.ReceiveMaster() + socket.Close() + if err != nil { + c <- &consoleR{ + err: err, + } + return + } + c <- &consoleR{ + c: master, + } + }() + return c + +} diff --git a/vendor/github.com/containerd/containerd/containerd-shim/process_solaris.go b/vendor/github.com/containerd/containerd/containerd-shim/process_solaris.go new file mode 100644 index 0000000..15465c3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd-shim/process_solaris.go @@ -0,0 +1,74 @@ +// +build solaris + +package containerdShim + +import ( + "io" + "os" + "syscall" +) + +// TODO: Update to using fifo's package in openIO. Need to +// 1. Merge and vendor changes in the package to use sys/unix. +// 2. Figure out why context.Background is timing out. +// openIO opens the pre-created fifo's for use with the container +// in RDWR so that they remain open if the other side stops listening +func (p *process) openIO() error { + p.stdio = &stdio{} + var ( + uid = p.state.RootUID + ) + i, err := p.initializeIO(uid) + if err != nil { + return err + } + p.shimIO = i + // Both tty and non-tty mode are handled by the runtime using + // the following pipes + for _, pair := range []struct { + name string + dest func(f *os.File) + }{ + { + p.state.Stdout, + func(f *os.File) { + p.Add(1) + go func() { + io.Copy(f, i.Stdout) + p.Done() + }() + }, + }, + { + p.state.Stderr, + func(f *os.File) { + p.Add(1) + go func() { + io.Copy(f, i.Stderr) + p.Done() + }() + }, + }, + } { + f, err := os.OpenFile(pair.name, syscall.O_RDWR, 0) + if err != nil { + return err + } + pair.dest(f) + } + + f, err := os.OpenFile(p.state.Stdin, syscall.O_RDONLY, 0) + if err != nil { + return err + } + go func() { + io.Copy(i.Stdin, f) + i.Stdin.Close() + }() + + return nil +} + +func (p *process) killAll() error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/containerd/main.go b/vendor/github.com/containerd/containerd/containerd/main.go new file mode 100644 index 0000000..913cbf6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd/main.go @@ -0,0 +1,280 @@ +package containerd + +import ( + "fmt" + "log" + "net" + "os" + "os/signal" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/Sirupsen/logrus" + "github.com/urfave/cli" + "github.com/cyberdelia/go-metrics-graphite" + "github.com/containerd/containerd" + grpcserver "github.com/containerd/containerd/api/grpc/server" + "github.com/containerd/containerd/api/grpc/types" + "github.com/containerd/containerd/api/http/pprof" + "github.com/containerd/containerd/supervisor" + "github.com/docker/docker/pkg/listeners" + "github.com/rcrowley/go-metrics" +) + +const ( + usage = `High performance container daemon` + minRlimit = 1024 + defaultStateDir = "/run/containerd" + defaultGRPCEndpoint = "unix:///run/containerd/containerd.sock" +) + +var daemonFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "debug", + Usage: "enable debug output in the logs", + }, + cli.StringFlag{ + Name: "log-level", + Usage: "Set the logging level [debug, info, warn, error, fatal, panic]", + }, + cli.StringFlag{ + Name: "state-dir", + Value: defaultStateDir, + Usage: "runtime state directory", + }, + cli.DurationFlag{ + Name: "metrics-interval", + Value: 5 * time.Minute, + Usage: "interval for flushing metrics to the store", + }, + cli.StringFlag{ + Name: "listen,l", + Value: defaultGRPCEndpoint, + Usage: "proto://address on which the GRPC API will listen", + }, + cli.StringFlag{ + Name: "runtime,r", + Value: "runc", + Usage: "name or path of the OCI compliant runtime to use when executing containers", + }, + cli.StringSliceFlag{ + Name: "runtime-args", + Value: &cli.StringSlice{}, + Usage: "specify additional runtime args", + }, + cli.StringFlag{ + Name: "shim", + Value: "containerd-shim", + Usage: "Name or path of shim", + }, + cli.StringFlag{ + Name: "pprof-address", + Usage: "http address to listen for pprof events", + }, + cli.DurationFlag{ + Name: "start-timeout", + Value: 30 * time.Second, + Usage: "timeout duration for waiting on a container to start before it is killed", + }, + cli.IntFlag{ + Name: "retain-count", + Value: 500, + Usage: "number of past events to keep in the event log", + }, + cli.StringFlag{ + Name: "graphite-address", + Usage: "Address of graphite server", + }, +} + +// DumpStacks dumps the runtime stack. +func dumpStacks() { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) +} + +func setupDumpStacksTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + dumpStacks() + } + }() +} + +func Main() { + logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: time.RFC3339Nano}) + app := cli.NewApp() + app.Name = "containerd" + if containerd.GitCommit != "" { + app.Version = fmt.Sprintf("%s commit: %s", containerd.Version, containerd.GitCommit) + } else { + app.Version = containerd.Version + } + app.Usage = usage + app.Flags = daemonFlags + app.Before = func(context *cli.Context) error { + setupDumpStacksTrap() + if context.GlobalBool("debug") { + logrus.SetLevel(logrus.DebugLevel) + if context.GlobalDuration("metrics-interval") > 0 { + if err := debugMetrics(context.GlobalDuration("metrics-interval"), context.GlobalString("graphite-address")); err != nil { + return err + } + } + } + if logLevel := context.GlobalString("log-level"); logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + lvl = logrus.InfoLevel + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n, and being defaulted to info", logLevel) + } + logrus.SetLevel(lvl) + } + if p := context.GlobalString("pprof-address"); len(p) > 0 { + pprof.Enable(p) + } + if err := checkLimits(); err != nil { + return err + } + return nil + } + + app.Action = func(context *cli.Context) { + if err := daemon(context); err != nil { + logrus.Fatal(err) + } + } + if err := app.Run(os.Args); err != nil { + logrus.Fatal(err) + } +} + +func daemon(context *cli.Context) error { + stateDir := context.String("state-dir") + if err := os.MkdirAll(stateDir, 0755); err != nil { + return err + } + s := make(chan os.Signal, 2048) + signal.Notify(s, syscall.SIGTERM, syscall.SIGINT) + // Split the listen string of the form proto://addr + listenSpec := context.String("listen") + listenParts := strings.SplitN(listenSpec, "://", 2) + if len(listenParts) != 2 { + return fmt.Errorf("bad listen address format %s, expected proto://address", listenSpec) + } + // Register server early to allow healthcheck to be done + server, err := startServer(listenParts[0], listenParts[1]) + if err != nil { + return err + } + sv, err := supervisor.New( + stateDir, + context.String("runtime"), + context.String("shim"), + context.StringSlice("runtime-args"), + context.Duration("start-timeout"), + context.Int("retain-count")) + if err != nil { + return err + } + types.RegisterAPIServer(server, grpcserver.NewServer(sv)) + wg := &sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + w := supervisor.NewWorker(sv, wg) + go w.Start() + } + if err := sv.Start(); err != nil { + return err + } + for ss := range s { + switch ss { + case syscall.SIGPIPE: + continue + default: + logrus.Infof("stopping containerd after receiving %s", ss) + server.Stop() + os.Exit(0) + } + } + return nil +} + +func startServer(protocol, address string) (*grpc.Server, error) { + // TODO: We should use TLS. + // TODO: Add an option for the SocketGroup. + sockets, err := listeners.Init(protocol, address, "", nil) + if err != nil { + return nil, err + } + if len(sockets) != 1 { + return nil, fmt.Errorf("incorrect number of listeners") + } + l := sockets[0] + s := grpc.NewServer() + healthServer := health.NewServer() + grpc_health_v1.RegisterHealthServer(s, healthServer) + + go func() { + logrus.Debugf("containerd: grpc api on %s", address) + if err := s.Serve(l); err != nil { + logrus.WithField("error", err).Fatal("containerd: serve grpc") + } + }() + return s, nil +} + +func checkLimits() error { + var l syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l); err != nil { + return err + } + if l.Cur <= minRlimit { + logrus.WithFields(logrus.Fields{ + "current": l.Cur, + "max": l.Max, + }).Warn("containerd: low RLIMIT_NOFILE changing to max") + l.Cur = l.Max + return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + return nil +} + +func debugMetrics(interval time.Duration, graphiteAddr string) error { + for name, m := range supervisor.Metrics() { + if err := metrics.DefaultRegistry.Register(name, m); err != nil { + return err + } + } + processMetrics() + if graphiteAddr != "" { + addr, err := net.ResolveTCPAddr("tcp", graphiteAddr) + if err != nil { + return err + } + go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) + } else { + l := log.New(os.Stdout, "[containerd] ", log.LstdFlags) + go metrics.Log(metrics.DefaultRegistry, interval, l) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/containerd/main_linux.go b/vendor/github.com/containerd/containerd/containerd/main_linux.go new file mode 100644 index 0000000..6b48e53 --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd/main_linux.go @@ -0,0 +1,45 @@ +package containerd + +import ( + "os" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/cloudfoundry/gosigar" + "github.com/containerd/containerd/osutils" + "github.com/rcrowley/go-metrics" +) + +func processMetrics() { + var ( + g = metrics.NewGauge() + fg = metrics.NewGauge() + memg = metrics.NewGauge() + ) + metrics.DefaultRegistry.Register("goroutines", g) + metrics.DefaultRegistry.Register("fds", fg) + metrics.DefaultRegistry.Register("memory-used", memg) + collect := func() { + // update number of goroutines + g.Update(int64(runtime.NumGoroutine())) + // collect the number of open fds + fds, err := osutils.GetOpenFds(os.Getpid()) + if err != nil { + logrus.WithField("error", err).Error("containerd: get open fd count") + } + fg.Update(int64(fds)) + // get the memory used + m := sigar.ProcMem{} + if err := m.Get(os.Getpid()); err != nil { + logrus.WithField("error", err).Error("containerd: get pid memory information") + } + memg.Update(int64(m.Size)) + } + go func() { + collect() + for range time.Tick(30 * time.Second) { + collect() + } + }() +} diff --git a/vendor/github.com/containerd/containerd/containerd/main_solaris.go b/vendor/github.com/containerd/containerd/containerd/main_solaris.go new file mode 100644 index 0000000..fdcfc34 --- /dev/null +++ b/vendor/github.com/containerd/containerd/containerd/main_solaris.go @@ -0,0 +1,4 @@ +package containerd + +func processMetrics() { +} diff --git a/vendor/github.com/containerd/containerd/ctr/checkpoint_linux.go b/vendor/github.com/containerd/containerd/ctr/checkpoint_linux.go new file mode 100644 index 0000000..3f1922e --- /dev/null +++ b/vendor/github.com/containerd/containerd/ctr/checkpoint_linux.go @@ -0,0 +1,165 @@ +package ctr + +import ( + "fmt" + "os" + "text/tabwriter" + + "github.com/urfave/cli" + "github.com/containerd/containerd/api/grpc/types" + netcontext "golang.org/x/net/context" +) + +var checkpointSubCmds = []cli.Command{ + listCheckpointCommand, + createCheckpointCommand, + deleteCheckpointCommand, +} + +var checkpointCommand = cli.Command{ + Name: "checkpoints", + Usage: "list all checkpoints", + ArgsUsage: "COMMAND [arguments...]", + Subcommands: checkpointSubCmds, + Description: func() string { + desc := "\n COMMAND:\n" + for _, command := range checkpointSubCmds { + desc += fmt.Sprintf(" %-10.10s%s\n", command.Name, command.Usage) + } + return desc + }(), + Action: listCheckpoints, +} + +var listCheckpointCommand = cli.Command{ + Name: "list", + Usage: "list all checkpoints for a container", + Action: listCheckpoints, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "checkpoint-dir", + Value: "", + Usage: "path to checkpoint directory", + }, + }, +} + +func listCheckpoints(context *cli.Context) { + var ( + c = getClient(context) + id = context.Args().First() + ) + if id == "" { + fatal("container id cannot be empty", ExitStatusMissingArg) + } + resp, err := c.ListCheckpoint(netcontext.Background(), &types.ListCheckpointRequest{ + Id: id, + CheckpointDir: context.String("checkpoint-dir"), + }) + if err != nil { + fatal(err.Error(), 1) + } + w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0) + fmt.Fprint(w, "NAME\tTCP\tUNIX SOCKETS\tSHELL\n") + for _, c := range resp.Checkpoints { + fmt.Fprintf(w, "%s\t%v\t%v\t%v\n", c.Name, c.Tcp, c.UnixSockets, c.Shell) + } + if err := w.Flush(); err != nil { + fatal(err.Error(), 1) + } +} + +var createCheckpointCommand = cli.Command{ + Name: "create", + Usage: "create a new checkpoint for the container", + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "tcp", + Usage: "persist open tcp connections", + }, + cli.BoolFlag{ + Name: "unix-sockets", + Usage: "persist unix sockets", + }, + cli.BoolFlag{ + Name: "exit", + Usage: "exit the container after the checkpoint completes successfully", + }, + cli.BoolFlag{ + Name: "shell", + Usage: "checkpoint shell jobs", + }, + cli.StringFlag{ + Name: "checkpoint-dir", + Value: "", + Usage: "directory to store checkpoints", + }, + cli.StringSliceFlag{ + Name: "empty-ns", + Usage: "create a namespace, but don't restore its properties", + }, + }, + Action: func(context *cli.Context) { + var ( + containerID = context.Args().Get(0) + name = context.Args().Get(1) + ) + if containerID == "" { + fatal("container id at cannot be empty", ExitStatusMissingArg) + } + if name == "" { + fatal("checkpoint name cannot be empty", ExitStatusMissingArg) + } + c := getClient(context) + checkpoint := types.Checkpoint{ + Name: name, + Exit: context.Bool("exit"), + Tcp: context.Bool("tcp"), + Shell: context.Bool("shell"), + UnixSockets: context.Bool("unix-sockets"), + } + + emptyNSes := context.StringSlice("empty-ns") + checkpoint.EmptyNS = append(checkpoint.EmptyNS, emptyNSes...) + + if _, err := c.CreateCheckpoint(netcontext.Background(), &types.CreateCheckpointRequest{ + Id: containerID, + CheckpointDir: context.String("checkpoint-dir"), + Checkpoint: &checkpoint, + }); err != nil { + fatal(err.Error(), 1) + } + }, +} + +var deleteCheckpointCommand = cli.Command{ + Name: "delete", + Usage: "delete a container's checkpoint", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "checkpoint-dir", + Value: "", + Usage: "path to checkpoint directory", + }, + }, + Action: func(context *cli.Context) { + var ( + containerID = context.Args().Get(0) + name = context.Args().Get(1) + ) + if containerID == "" { + fatal("container id at cannot be empty", ExitStatusMissingArg) + } + if name == "" { + fatal("checkpoint name cannot be empty", ExitStatusMissingArg) + } + c := getClient(context) + if _, err := c.DeleteCheckpoint(netcontext.Background(), &types.DeleteCheckpointRequest{ + Id: containerID, + Name: name, + CheckpointDir: context.String("checkpoint-dir"), + }); err != nil { + fatal(err.Error(), 1) + } + }, +} diff --git a/vendor/github.com/containerd/containerd/ctr/checkpoint_solaris.go b/vendor/github.com/containerd/containerd/ctr/checkpoint_solaris.go new file mode 100644 index 0000000..0bed786 --- /dev/null +++ b/vendor/github.com/containerd/containerd/ctr/checkpoint_solaris.go @@ -0,0 +1,36 @@ +package ctr + +import ( + "fmt" + + "github.com/urfave/cli" +) + +var checkpointSubCmds = []cli.Command{ + listCheckpointCommand, +} + +var checkpointCommand = cli.Command{ + Name: "checkpoints", + Usage: "list all checkpoints", + ArgsUsage: "COMMAND [arguments...]", + Subcommands: checkpointSubCmds, + Description: func() string { + desc := "\n COMMAND:\n" + for _, command := range checkpointSubCmds { + desc += fmt.Sprintf(" %-10.10s%s\n", command.Name, command.Usage) + } + return desc + }(), + Action: listCheckpoints, +} + +var listCheckpointCommand = cli.Command{ + Name: "list", + Usage: "list all checkpoints for a container", + Action: listCheckpoints, +} + +func listCheckpoints(context *cli.Context) { + fatal("checkpoint command is not supported on Solaris", ExitStatusUnsupported) +} diff --git a/vendor/github.com/containerd/containerd/ctr/const.go b/vendor/github.com/containerd/containerd/ctr/const.go new file mode 100644 index 0000000..3f962d3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/ctr/const.go @@ -0,0 +1,12 @@ +package ctr + +// ctr wide constants +const ( + // ExitStatusOK indicates successful completion + ExitStatusOK = 0 + + // ExitStatusMissingArg indicates failure due to missing argument(s) + ExitStatusMissingArg = 1 + // ExitStatusUnsupported indicates failure due to unsupported subcommand(s) + ExitStatusUnsupported = 2 +) diff --git a/vendor/github.com/containerd/containerd/ctr/container.go b/vendor/github.com/containerd/containerd/ctr/container.go new file mode 100644 index 0000000..f57f866 --- /dev/null +++ b/vendor/github.com/containerd/containerd/ctr/container.go @@ -0,0 +1,718 @@ +package ctr + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "path/filepath" + "strconv" + "strings" + "syscall" + "text/tabwriter" + "time" + + "github.com/containerd/containerd/api/grpc/types" + "github.com/containerd/containerd/specs" + "github.com/containerd/console" + "github.com/golang/protobuf/ptypes" + "github.com/urfave/cli" + netcontext "golang.org/x/net/context" + "golang.org/x/sys/unix" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/transport" +) + +// TODO: parse flags and pass opts +func getClient(ctx *cli.Context) types.APIClient { + // Parse proto://address form addresses. + bindSpec := ctx.GlobalString("address") + bindParts := strings.SplitN(bindSpec, "://", 2) + if len(bindParts) != 2 { + fatal(fmt.Sprintf("bad bind address format %s, expected proto://address", bindSpec), 1) + } + + // reset the logger for grpc to log to dev/null so that it does not mess with our stdio + grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) + dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(ctx.GlobalDuration("conn-timeout"))} + dialOpts = append(dialOpts, + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout(bindParts[0], bindParts[1], timeout) + }, + )) + conn, err := grpc.Dial(bindSpec, dialOpts...) + if err != nil { + fatal(err.Error(), 1) + } + return types.NewAPIClient(conn) +} + +var contSubCmds = []cli.Command{ + execCommand, + killCommand, + listCommand, + pauseCommand, + resumeCommand, + startCommand, + stateCommand, + statsCommand, + watchCommand, + updateCommand, +} + +var containersCommand = cli.Command{ + Name: "containers", + Usage: "interact with running containers", + ArgsUsage: "COMMAND [arguments...]", + Subcommands: contSubCmds, + Description: func() string { + desc := "\n COMMAND:\n" + for _, command := range contSubCmds { + desc += fmt.Sprintf(" %-10.10s%s\n", command.Name, command.Usage) + } + return desc + }(), + Action: listContainers, +} + +var stateCommand = cli.Command{ + Name: "state", + Usage: "get a raw dump of the containerd state", + Action: func(context *cli.Context) error { + c := getClient(context) + resp, err := c.State(netcontext.Background(), &types.StateRequest{ + Id: context.Args().First(), + }) + if err != nil { + return err + } + data, err := json.Marshal(resp) + if err != nil { + return err + } + fmt.Print(string(data)) + return nil + }, +} + +var listCommand = cli.Command{ + Name: "list", + Usage: "list all running containers", + Action: listContainers, +} + +func listContainers(context *cli.Context) error { + c := getClient(context) + resp, err := c.State(netcontext.Background(), &types.StateRequest{ + Id: context.Args().First(), + }) + if err != nil { + return err + } + w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0) + fmt.Fprint(w, "ID\tPATH\tSTATUS\tPROCESSES\n") + sortContainers(resp.Containers) + for _, c := range resp.Containers { + procs := []string{} + for _, p := range c.Processes { + procs = append(procs, p.Pid) + } + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", c.Id, c.BundlePath, c.Status, strings.Join(procs, ",")) + } + return w.Flush() +} + +var startCommand = cli.Command{ + Name: "start", + Usage: "start a container", + ArgsUsage: "ID BundlePath", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "checkpoint,c", + Value: "", + Usage: "checkpoint to start the container from", + }, + cli.StringFlag{ + Name: "checkpoint-dir", + Value: "", + Usage: "path to checkpoint directory", + }, + cli.BoolFlag{ + Name: "attach,a", + Usage: "connect to the stdio of the container", + }, + cli.StringSliceFlag{ + Name: "label,l", + Value: &cli.StringSlice{}, + Usage: "set labels for the container", + }, + cli.BoolFlag{ + Name: "no-pivot", + Usage: "do not use pivot root", + }, + cli.StringFlag{ + Name: "runtime,r", + Value: "runc", + Usage: "name or path of the OCI compliant runtime to use when executing containers", + }, + cli.StringSliceFlag{ + Name: "runtime-args", + Value: &cli.StringSlice{}, + Usage: "specify additional runtime args", + }, + }, + Action: func(context *cli.Context) error { + var ( + id = context.Args().Get(0) + path = context.Args().Get(1) + ) + if path == "" { + fatal("bundle path cannot be empty", ExitStatusMissingArg) + } + if id == "" { + fatal("container id cannot be empty", ExitStatusMissingArg) + } + bpath, err := filepath.Abs(path) + if err != nil { + return fmt.Errorf("cannot get the absolute path of the bundle: %v", err) + } + s, tmpDir, err := createStdio() + defer func() { + if tmpDir != "" { + os.RemoveAll(tmpDir) + } + }() + if err != nil { + return err + } + var ( + con console.Console + tty bool + c = getClient(context) + r = &types.CreateContainerRequest{ + Id: id, + BundlePath: bpath, + Checkpoint: context.String("checkpoint"), + CheckpointDir: context.String("checkpoint-dir"), + Stdin: s.stdin, + Stdout: s.stdout, + Stderr: s.stderr, + Labels: context.StringSlice("label"), + NoPivotRoot: context.Bool("no-pivot"), + Runtime: context.String("runtime"), + RuntimeArgs: context.StringSlice("runtime-args"), + } + ) + if context.Bool("attach") { + mkterm, err := readTermSetting(bpath) + if err != nil { + return err + } + tty = mkterm + if mkterm { + con = console.Current() + defer func() { + con.Reset() + con.Close() + }() + if err := con.SetRaw(); err != nil { + return err + } + } + if err := attachStdio(s); err != nil { + return err + } + } + events, err := c.Events(netcontext.Background(), &types.EventsRequest{}) + if err != nil { + return err + } + if _, err := c.CreateContainer(netcontext.Background(), r); err != nil { + return err + } + if context.Bool("attach") { + go func() { + io.Copy(stdin, os.Stdin) + if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{ + Id: id, + Pid: "init", + CloseStdin: true, + }); err != nil { + fatal(err.Error(), 1) + } + con.Reset() + con.Close() + }() + if tty { + resize(id, "init", c, con) + go func() { + s := make(chan os.Signal, 64) + signal.Notify(s, syscall.SIGWINCH) + for range s { + if err := resize(id, "init", c, con); err != nil { + log.Println(err) + } + } + }() + } + time.Sleep(2 * time.Second) + log.Println("closing stdin now") + if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{ + Id: id, + Pid: "init", + CloseStdin: true, + }); err != nil { + fatal(err.Error(), 1) + } + + waitForExit(c, events, id, "init", con) + } + return nil + }, +} + +func resize(id, pid string, c types.APIClient, con console.Console) error { + ws, err := con.Size() + if err != nil { + return err + } + if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{ + Id: id, + Pid: "init", + Width: uint32(ws.Width), + Height: uint32(ws.Height), + }); err != nil { + return err + } + return nil +} + +var ( + stdin io.WriteCloser +) + +// readTermSetting reads the Terminal option out of the specs configuration +// to know if ctr should allocate a pty +func readTermSetting(path string) (bool, error) { + f, err := os.Open(filepath.Join(path, "config.json")) + if err != nil { + return false, err + } + defer f.Close() + var spec specs.Spec + if err := json.NewDecoder(f).Decode(&spec); err != nil { + return false, err + } + return spec.Process.Terminal, nil +} + +func attachStdio(s stdio) error { + stdinf, err := os.OpenFile(s.stdin, syscall.O_RDWR, 0) + if err != nil { + return err + } + // FIXME: assign to global + stdin = stdinf + stdoutf, err := os.OpenFile(s.stdout, syscall.O_RDWR, 0) + if err != nil { + return err + } + go io.Copy(os.Stdout, stdoutf) + stderrf, err := os.OpenFile(s.stderr, syscall.O_RDWR, 0) + if err != nil { + return err + } + go io.Copy(os.Stderr, stderrf) + return nil +} + +var watchCommand = cli.Command{ + Name: "watch", + Usage: "print container events", + Action: func(context *cli.Context) error { + c := getClient(context) + id := context.Args().First() + if id != "" { + resp, err := c.State(netcontext.Background(), &types.StateRequest{Id: id}) + if err != nil { + return err + } + for _, c := range resp.Containers { + if c.Id == id { + break + } + } + if id == "" { + fatal("Invalid container id", 1) + } + } + events, reqErr := c.Events(netcontext.Background(), &types.EventsRequest{}) + if reqErr != nil { + return reqErr + } + + for { + e, err := events.Recv() + if err != nil { + return err + } + + if id == "" || e.Id == id { + fmt.Printf("%#v\n", e) + } + return nil + } + }, +} + +var pauseCommand = cli.Command{ + Name: "pause", + Usage: "pause a container", + Action: func(context *cli.Context) error { + id := context.Args().First() + if id == "" { + fatal("container id cannot be empty", ExitStatusMissingArg) + } + c := getClient(context) + _, err := c.UpdateContainer(netcontext.Background(), &types.UpdateContainerRequest{ + Id: id, + Pid: "init", + Status: "paused", + }) + return err + }, +} + +var resumeCommand = cli.Command{ + Name: "resume", + Usage: "resume a paused container", + Action: func(context *cli.Context) error { + id := context.Args().First() + if id == "" { + fatal("container id cannot be empty", ExitStatusMissingArg) + } + c := getClient(context) + _, err := c.UpdateContainer(netcontext.Background(), &types.UpdateContainerRequest{ + Id: id, + Pid: "init", + Status: "running", + }) + return err + }, +} + +var killCommand = cli.Command{ + Name: "kill", + Usage: "send a signal to a container or its processes", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "pid,p", + Value: "init", + Usage: "pid of the process to signal within the container", + }, + cli.IntFlag{ + Name: "signal,s", + Value: 15, + Usage: "signal to send to the container", + }, + }, + Action: func(context *cli.Context) error { + id := context.Args().First() + if id == "" { + fatal("container id cannot be empty", ExitStatusMissingArg) + } + c := getClient(context) + _, err := c.Signal(netcontext.Background(), &types.SignalRequest{ + Id: id, + Pid: context.String("pid"), + Signal: uint32(context.Int("signal")), + }) + return err + }, +} + +var execCommand = cli.Command{ + Name: "exec", + Usage: "exec another process in an existing container", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "id", + Usage: "container id to add the process to", + }, + cli.StringFlag{ + Name: "pid", + Usage: "process id for the new process", + }, + cli.BoolFlag{ + Name: "attach,a", + Usage: "connect to the stdio of the container", + }, + cli.StringFlag{ + Name: "cwd", + Usage: "current working directory for the process", + Value: "/", + }, + cli.BoolFlag{ + Name: "tty,t", + Usage: "create a terminal for the process", + }, + cli.StringSliceFlag{ + Name: "env,e", + Value: &cli.StringSlice{}, + Usage: "environment variables for the process", + }, + cli.IntFlag{ + Name: "uid,u", + Usage: "user id of the user for the process", + }, + cli.IntFlag{ + Name: "gid,g", + Usage: "group id of the user for the process", + }, + }, + Action: func(context *cli.Context) error { + p := &types.AddProcessRequest{ + Id: context.String("id"), + Pid: context.String("pid"), + Args: context.Args(), + Cwd: context.String("cwd"), + Terminal: context.Bool("tty"), + Env: context.StringSlice("env"), + User: &types.User{ + Uid: uint32(context.Int("uid")), + Gid: uint32(context.Int("gid")), + }, + } + s, tmpDir, err := createStdio() + defer func() { + if tmpDir != "" { + os.RemoveAll(tmpDir) + } + }() + if err != nil { + return err + } + p.Stdin = s.stdin + p.Stdout = s.stdout + p.Stderr = s.stderr + + var con console.Console + if context.Bool("attach") { + if context.Bool("tty") { + con = console.Current() + defer func() { + con.Reset() + con.Close() + }() + if err := con.SetRaw(); err != nil { + return err + } + } + if err := attachStdio(s); err != nil { + return err + } + } + c := getClient(context) + events, err := c.Events(netcontext.Background(), &types.EventsRequest{}) + if err != nil { + return err + } + if _, err := c.AddProcess(netcontext.Background(), p); err != nil { + return err + } + if context.Bool("attach") { + go func() { + io.Copy(stdin, os.Stdin) + if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{ + Id: p.Id, + Pid: p.Pid, + CloseStdin: true, + }); err != nil { + log.Println(err) + } + con.Reset() + con.Close() + }() + if context.Bool("tty") { + resize(p.Id, p.Pid, c, con) + go func() { + s := make(chan os.Signal, 64) + signal.Notify(s, syscall.SIGWINCH) + for range s { + if err := resize(p.Id, p.Pid, c, con); err != nil { + log.Println(err) + } + } + }() + } + waitForExit(c, events, context.String("id"), context.String("pid"), con) + } + return nil + }, +} + +var statsCommand = cli.Command{ + Name: "stats", + Usage: "get stats for running container", + Action: func(context *cli.Context) error { + req := &types.StatsRequest{ + Id: context.Args().First(), + } + c := getClient(context) + stats, err := c.Stats(netcontext.Background(), req) + if err != nil { + return err + } + data, err := json.Marshal(stats) + if err != nil { + return err + } + fmt.Print(string(data)) + return nil + }, +} + +func getUpdateCommandInt64Flag(context *cli.Context, name string) uint64 { + str := context.String(name) + if str == "" { + return 0 + } + + val, err := strconv.ParseUint(str, 0, 64) + if err != nil { + fatal(err.Error(), 1) + } + + return val +} + +var updateCommand = cli.Command{ + Name: "update", + Usage: "update a containers resources", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "memory-limit", + }, + cli.StringFlag{ + Name: "memory-reservation", + }, + cli.StringFlag{ + Name: "memory-swap", + }, + cli.StringFlag{ + Name: "cpu-quota", + }, + cli.StringFlag{ + Name: "cpu-period", + }, + cli.StringFlag{ + Name: "kernel-limit", + }, + cli.StringFlag{ + Name: "kernel-tcp-limit", + }, + cli.StringFlag{ + Name: "blkio-weight", + }, + cli.StringFlag{ + Name: "cpuset-cpus", + }, + cli.StringFlag{ + Name: "cpuset-mems", + }, + cli.StringFlag{ + Name: "pids-limit", + }, + }, + Action: func(context *cli.Context) error { + req := &types.UpdateContainerRequest{ + Id: context.Args().First(), + } + req.Resources = &types.UpdateResource{} + req.Resources.MemoryLimit = getUpdateCommandInt64Flag(context, "memory-limit") + req.Resources.MemoryReservation = getUpdateCommandInt64Flag(context, "memory-reservation") + req.Resources.MemorySwap = getUpdateCommandInt64Flag(context, "memory-swap") + req.Resources.BlkioWeight = getUpdateCommandInt64Flag(context, "blkio-weight") + req.Resources.CpuPeriod = getUpdateCommandInt64Flag(context, "cpu-period") + req.Resources.CpuQuota = getUpdateCommandInt64Flag(context, "cpu-quota") + req.Resources.CpuShares = getUpdateCommandInt64Flag(context, "cpu-shares") + req.Resources.CpusetCpus = context.String("cpuset-cpus") + req.Resources.CpusetMems = context.String("cpuset-mems") + req.Resources.KernelMemoryLimit = getUpdateCommandInt64Flag(context, "kernel-limit") + req.Resources.KernelTCPMemoryLimit = getUpdateCommandInt64Flag(context, "kernel-tcp-limit") + req.Resources.PidsLimit = getUpdateCommandInt64Flag(context, "pids-limit") + c := getClient(context) + _, err := c.UpdateContainer(netcontext.Background(), req) + return err + }, +} + +func waitForExit(c types.APIClient, events types.API_EventsClient, id, pid string, con console.Console) { + timestamp := time.Now() + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc { + if con != nil { + con.Reset() + con.Close() + } + os.Exit(128 + int(syscall.SIGHUP)) + } + time.Sleep(1 * time.Second) + tsp, err := ptypes.TimestampProto(timestamp) + if err != nil { + if con != nil { + con.Reset() + con.Close() + } + fmt.Fprintf(os.Stderr, "%s", err.Error()) + os.Exit(1) + } + events, _ = c.Events(netcontext.Background(), &types.EventsRequest{Timestamp: tsp}) + continue + } + timestamp, err = ptypes.Timestamp(e.Timestamp) + if e.Id == id && e.Type == "exit" && e.Pid == pid { + if con != nil { + con.Reset() + con.Close() + } + os.Exit(int(e.Status)) + } + } +} + +type stdio struct { + stdin string + stdout string + stderr string +} + +func createStdio() (s stdio, tmp string, err error) { + tmp, err = ioutil.TempDir("", "ctr-") + if err != nil { + return s, tmp, err + } + // create fifo's for the process + for _, pair := range []struct { + name string + fd *string + }{ + {"stdin", &s.stdin}, + {"stdout", &s.stdout}, + {"stderr", &s.stderr}, + } { + path := filepath.Join(tmp, pair.name) + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return s, tmp, err + } + *pair.fd = path + } + return s, tmp, nil +} diff --git a/vendor/github.com/containerd/containerd/ctr/events.go b/vendor/github.com/containerd/containerd/ctr/events.go new file mode 100644 index 0000000..03696f4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/ctr/events.go @@ -0,0 +1,63 @@ +package ctr + +import ( + "fmt" + "os" + "text/tabwriter" + "time" + + "github.com/urfave/cli" + "github.com/containerd/containerd/api/grpc/types" + "github.com/golang/protobuf/ptypes" + netcontext "golang.org/x/net/context" +) + +var eventsCommand = cli.Command{ + Name: "events", + Usage: "receive events from the containerd daemon", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "timestamp,t", + Usage: "get events from a specific time stamp in RFC3339Nano format", + }, + }, + Action: func(context *cli.Context) { + var ( + t = time.Time{} + c = getClient(context) + ) + if ts := context.String("timestamp"); ts != "" { + from, err := time.Parse(time.RFC3339Nano, ts) + if err != nil { + fatal(err.Error(), 1) + } + t = from + } + tsp, err := ptypes.TimestampProto(t) + if err != nil { + fatal(err.Error(), 1) + } + events, err := c.Events(netcontext.Background(), &types.EventsRequest{ + Timestamp: tsp, + }) + if err != nil { + fatal(err.Error(), 1) + } + w := tabwriter.NewWriter(os.Stdout, 31, 1, 1, ' ', 0) + fmt.Fprint(w, "TIME\tTYPE\tID\tPID\tSTATUS\n") + w.Flush() + for { + e, err := events.Recv() + if err != nil { + fatal(err.Error(), 1) + } + t, err := ptypes.Timestamp(e.Timestamp) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to convert timestamp") + t = time.Time{} + } + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\n", t.Format(time.RFC3339Nano), e.Type, e.Id, e.Pid, e.Status) + w.Flush() + } + }, +} diff --git a/vendor/github.com/containerd/containerd/ctr/main.go b/vendor/github.com/containerd/containerd/ctr/main.go new file mode 100644 index 0000000..d0d8e5c --- /dev/null +++ b/vendor/github.com/containerd/containerd/ctr/main.go @@ -0,0 +1,90 @@ +package ctr + +import ( + "fmt" + "os" + "time" + + netcontext "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/urfave/cli" + "github.com/containerd/containerd" + "github.com/containerd/containerd/api/grpc/types" +) + +const usage = `High performance container daemon cli` + +type exit struct { + Code int +} + +func Main() { + // We want our defer functions to be run when calling fatal() + defer func() { + if e := recover(); e != nil { + if ex, ok := e.(exit); ok == true { + os.Exit(ex.Code) + } + panic(e) + } + }() + app := cli.NewApp() + app.Name = "ctr" + if containerd.GitCommit != "" { + app.Version = fmt.Sprintf("%s commit: %s", containerd.Version, containerd.GitCommit) + } else { + app.Version = containerd.Version + } + app.Usage = usage + app.Flags = []cli.Flag{ + cli.BoolFlag{ + Name: "debug", + Usage: "enable debug output in the logs", + }, + cli.StringFlag{ + Name: "address", + Value: "unix:///run/containerd/containerd.sock", + Usage: "proto://address of GRPC API", + }, + cli.DurationFlag{ + Name: "conn-timeout", + Value: 1 * time.Second, + Usage: "GRPC connection timeout", + }, + } + app.Commands = []cli.Command{ + checkpointCommand, + containersCommand, + eventsCommand, + stateCommand, + versionCommand, + } + app.Before = func(context *cli.Context) error { + if context.GlobalBool("debug") { + logrus.SetLevel(logrus.DebugLevel) + } + return nil + } + if err := app.Run(os.Args); err != nil { + logrus.Fatal(err) + } +} + +var versionCommand = cli.Command{ + Name: "version", + Usage: "return the daemon version", + Action: func(context *cli.Context) { + c := getClient(context) + resp, err := c.GetServerVersion(netcontext.Background(), &types.GetServerVersionRequest{}) + if err != nil { + fatal(err.Error(), 1) + } + fmt.Printf("daemon version %d.%d.%d commit: %s\n", resp.Major, resp.Minor, resp.Patch, resp.Revision) + }, +} + +func fatal(err string, code int) { + fmt.Fprintf(os.Stderr, "[ctr] %s\n", err) + panic(exit{code}) +} diff --git a/vendor/github.com/containerd/containerd/ctr/sort.go b/vendor/github.com/containerd/containerd/ctr/sort.go new file mode 100644 index 0000000..461583d --- /dev/null +++ b/vendor/github.com/containerd/containerd/ctr/sort.go @@ -0,0 +1,27 @@ +package ctr + +import ( + "sort" + + "github.com/containerd/containerd/api/grpc/types" +) + +func sortContainers(c []*types.Container) { + sort.Sort(&containerSorter{c}) +} + +type containerSorter struct { + c []*types.Container +} + +func (s *containerSorter) Len() int { + return len(s.c) +} + +func (s *containerSorter) Swap(i, j int) { + s.c[i], s.c[j] = s.c[j], s.c[i] +} + +func (s *containerSorter) Less(i, j int) bool { + return s.c[i].Id < s.c[j].Id +} diff --git a/vendor/github.com/containerd/containerd/osutils/fds.go b/vendor/github.com/containerd/containerd/osutils/fds.go new file mode 100644 index 0000000..98fc930 --- /dev/null +++ b/vendor/github.com/containerd/containerd/osutils/fds.go @@ -0,0 +1,18 @@ +// +build !windows,!darwin + +package osutils + +import ( + "io/ioutil" + "path/filepath" + "strconv" +) + +// GetOpenFds returns the number of open fds for the process provided by pid +func GetOpenFds(pid int) (int, error) { + dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + if err != nil { + return -1, err + } + return len(dirs), nil +} diff --git a/vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go b/vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go new file mode 100644 index 0000000..5310f2e --- /dev/null +++ b/vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go @@ -0,0 +1,15 @@ +// +build !solaris + +package osutils + +import ( + "syscall" +) + +// SetPDeathSig sets the parent death signal to SIGKILL so that if the +// shim dies the container process also dies. +func SetPDeathSig() *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGKILL, + } +} diff --git a/vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go b/vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go new file mode 100644 index 0000000..512e24b --- /dev/null +++ b/vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go @@ -0,0 +1,8 @@ +// +build solaris + +package osutils + +// SetPDeathSig is a no-op on Solaris as Pdeathsig is not defined. +func SetPDeathSig() *syscall.SysProcAttr { + return nil +} diff --git a/vendor/github.com/containerd/containerd/osutils/prctl.go b/vendor/github.com/containerd/containerd/osutils/prctl.go new file mode 100644 index 0000000..1d6e251 --- /dev/null +++ b/vendor/github.com/containerd/containerd/osutils/prctl.go @@ -0,0 +1,48 @@ +// +build linux + +// Package osutils provide access to the Get Child and Set Child prctl +// flags. +// See http://man7.org/linux/man-pages/man2/prctl.2.html +package osutils + +import ( + "syscall" + "unsafe" +) + +// PR_SET_CHILD_SUBREAPER allows setting the child subreaper. +// If arg2 is nonzero, set the "child subreaper" attribute of the +// calling process; if arg2 is zero, unset the attribute. When a +// process is marked as a child subreaper, all of the children +// that it creates, and their descendants, will be marked as +// having a subreaper. In effect, a subreaper fulfills the role +// of init(1) for its descendant processes. Upon termination of +// a process that is orphaned (i.e., its immediate parent has +// already terminated) and marked as having a subreaper, the +// nearest still living ancestor subreaper will receive a SIGCHLD +// signal and be able to wait(2) on the process to discover its +// termination status. +const prSetChildSubreaper = 36 + +// PR_GET_CHILD_SUBREAPER allows retrieving the current child +// subreaper. +// Return the "child subreaper" setting of the caller, in the +// location pointed to by (int *) arg2. +const prGetChildSubreaper = 37 + +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + var i uintptr + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prGetChildSubreaper, uintptr(unsafe.Pointer(&i)), 0); err != 0 { + return -1, err + } + return int(i), nil +} + +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prSetChildSubreaper, uintptr(i), 0); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/osutils/prctl_solaris.go b/vendor/github.com/containerd/containerd/osutils/prctl_solaris.go new file mode 100644 index 0000000..84da5f9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/osutils/prctl_solaris.go @@ -0,0 +1,19 @@ +// +build solaris + +package osutils + +import ( + "errors" +) + +//Solaris TODO + +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + return 0, errors.New("osutils GetSubreaper not implemented on Solaris") +} + +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/osutils/reaper.go b/vendor/github.com/containerd/containerd/osutils/reaper.go new file mode 100644 index 0000000..6a80335 --- /dev/null +++ b/vendor/github.com/containerd/containerd/osutils/reaper.go @@ -0,0 +1,51 @@ +// +build !windows + +package osutils + +import "syscall" + +// Exit is the wait4 information from an exited process +type Exit struct { + Pid int + Status int +} + +// Reap reaps all child processes for the calling process and returns their +// exit information +func Reap(wait bool) (exits []Exit, err error) { + var ( + ws syscall.WaitStatus + rus syscall.Rusage + ) + flag := syscall.WNOHANG + if wait { + flag = 0 + } + for { + pid, err := syscall.Wait4(-1, &ws, flag, &rus) + if err != nil { + if err == syscall.ECHILD { + return exits, nil + } + return exits, err + } + if pid <= 0 { + return exits, nil + } + exits = append(exits, Exit{ + Pid: pid, + Status: exitStatus(ws), + }) + } +} + +const exitSignalOffset = 128 + +// exitStatus returns the correct exit status for a process based on if it +// was signaled or exited cleanly +func exitStatus(status syscall.WaitStatus) int { + if status.Signaled() { + return exitSignalOffset + int(status.Signal()) + } + return status.ExitStatus() +} diff --git a/vendor/github.com/containerd/containerd/runtime/container.go b/vendor/github.com/containerd/containerd/runtime/container.go new file mode 100644 index 0000000..a9677c4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/container.go @@ -0,0 +1,748 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/specs" + ocs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +// Container defines the operations allowed on a container +type Container interface { + // ID returns the container ID + ID() string + // Path returns the path to the bundle + Path() string + // Start starts the init process of the container + Start(ctx context.Context, checkpointPath string, s Stdio) (Process, error) + // Exec starts another process in an existing container + Exec(context.Context, string, specs.ProcessSpec, Stdio) (Process, error) + // Delete removes the container's state and any resources + Delete() error + // Processes returns all the containers processes that have been added + Processes() ([]Process, error) + // State returns the containers runtime state + State() State + // Resume resumes a paused container + Resume() error + // Pause pauses a running container + Pause() error + // RemoveProcess removes the specified process from the container + RemoveProcess(string) error + // Checkpoints returns all the checkpoints for a container + Checkpoints(checkpointDir string) ([]Checkpoint, error) + // Checkpoint creates a new checkpoint + Checkpoint(checkpoint Checkpoint, checkpointDir string) error + // DeleteCheckpoint deletes the checkpoint for the provided name + DeleteCheckpoint(name string, checkpointDir string) error + // Labels are user provided labels for the container + Labels() []string + // Pids returns all pids inside the container + Pids() ([]int, error) + // Stats returns realtime container stats and resource information + Stats() (*Stat, error) + // Name or path of the OCI compliant runtime used to execute the container + Runtime() string + // OOM signals the channel if the container received an OOM notification + OOM() (OOM, error) + // UpdateResource updates the containers resources to new values + UpdateResources(*Resource) error + + // Status return the current status of the container. + Status() (State, error) +} + +// OOM wraps a container OOM. +type OOM interface { + io.Closer + FD() int + ContainerID() string + Flush() + Removed() bool +} + +// Stdio holds the path to the 3 pipes used for the standard ios. +type Stdio struct { + Stdin string + Stdout string + Stderr string +} + +// NewStdio wraps the given standard io path into an Stdio struct. +// If a given parameter is the empty string, it is replaced by "/dev/null" +func NewStdio(stdin, stdout, stderr string) Stdio { + for _, s := range []*string{ + &stdin, &stdout, &stderr, + } { + if *s == "" { + *s = "/dev/null" + } + } + return Stdio{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + } +} + +// ContainerOpts keeps the options passed at container creation +type ContainerOpts struct { + Root string + ID string + Bundle string + Runtime string + RuntimeArgs []string + Shim string + Labels []string + NoPivotRoot bool + Timeout time.Duration +} + +// New returns a new container +func New(opts ContainerOpts) (Container, error) { + c := &container{ + root: opts.Root, + id: opts.ID, + bundle: opts.Bundle, + labels: opts.Labels, + processes: make(map[string]*process), + runtime: opts.Runtime, + runtimeArgs: opts.RuntimeArgs, + shim: opts.Shim, + noPivotRoot: opts.NoPivotRoot, + timeout: opts.Timeout, + } + if err := os.Mkdir(filepath.Join(c.root, c.id), 0755); err != nil { + return nil, err + } + f, err := os.Create(filepath.Join(c.root, c.id, StateFile)) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(state{ + Bundle: c.bundle, + Labels: c.labels, + Runtime: c.runtime, + RuntimeArgs: c.runtimeArgs, + Shim: c.shim, + NoPivotRoot: opts.NoPivotRoot, + }); err != nil { + return nil, err + } + return c, nil +} + +// Load return a new container from the matchin state file on disk. +func Load(root, id, shimName string, timeout time.Duration) (Container, error) { + var s state + f, err := os.Open(filepath.Join(root, id, StateFile)) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + c := &container{ + root: root, + id: id, + bundle: s.Bundle, + labels: s.Labels, + runtime: s.Runtime, + runtimeArgs: s.RuntimeArgs, + shim: s.Shim, + noPivotRoot: s.NoPivotRoot, + processes: make(map[string]*process), + timeout: timeout, + } + + if c.shim == "" { + c.shim = shimName + } + + dirs, err := ioutil.ReadDir(filepath.Join(root, id)) + if err != nil { + return nil, err + } + for _, d := range dirs { + if !d.IsDir() { + continue + } + pid := d.Name() + s, err := readProcessState(filepath.Join(root, id, pid)) + if err != nil { + return nil, err + } + p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s) + if err != nil { + logrus.WithField("id", id).WithField("pid", pid).Debugf("containerd: error loading process %s", err) + continue + } + c.processes[pid] = p + } + + _, err = os.Stat(c.bundle) + if err != nil && !os.IsExist(err) { + for key, p := range c.processes { + if key == InitProcessID { + p.Delete() + break + } + } + return nil, fmt.Errorf("bundle dir %s don't exist", c.bundle) + } + return c, nil +} + +func readProcessState(dir string) (*ProcessState, error) { + f, err := os.Open(filepath.Join(dir, "process.json")) + if err != nil { + return nil, err + } + defer f.Close() + var s ProcessState + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + return &s, nil +} + +type container struct { + // path to store runtime state information + root string + id string + bundle string + runtime string + runtimeArgs []string + shim string + processes map[string]*process + labels []string + oomFds []int + noPivotRoot bool + timeout time.Duration +} + +func (c *container) ID() string { + return c.id +} + +func (c *container) Path() string { + return c.bundle +} + +func (c *container) Labels() []string { + return c.labels +} + +func (c *container) readSpec() (*specs.Spec, error) { + var spec specs.Spec + f, err := os.Open(filepath.Join(c.bundle, "config.json")) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (c *container) Delete() error { + var err error + args := append(c.runtimeArgs, "delete", c.id) + if b, derr := exec.Command(c.runtime, args...).CombinedOutput(); derr != nil && !strings.Contains(string(b), "does not exist") { + err = fmt.Errorf("%s: %q", derr, string(b)) + } + if rerr := os.RemoveAll(filepath.Join(c.root, c.id)); rerr != nil { + if err != nil { + err = fmt.Errorf("%s; failed to remove %s: %s", err, filepath.Join(c.root, c.id), rerr) + } else { + err = rerr + } + } + return err +} + +func (c *container) Processes() ([]Process, error) { + out := []Process{} + for _, p := range c.processes { + out = append(out, p) + } + return out, nil +} + +func (c *container) RemoveProcess(pid string) error { + delete(c.processes, pid) + return os.RemoveAll(filepath.Join(c.root, c.id, pid)) +} + +func (c *container) State() State { + proc := c.processes[InitProcessID] + if proc == nil { + return Stopped + } + return proc.State() +} + +func (c *container) Runtime() string { + return c.runtime +} + +func (c *container) Pause() error { + args := c.runtimeArgs + args = append(args, "pause", c.id) + b, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(b)) + } + return nil +} + +func (c *container) Resume() error { + args := c.runtimeArgs + args = append(args, "resume", c.id) + b, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(b)) + } + return nil +} + +func (c *container) Checkpoints(checkpointDir string) ([]Checkpoint, error) { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + var out []Checkpoint + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + return out, nil +} + +func (c *container) Checkpoint(cpt Checkpoint, checkpointDir string) error { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return err + } + + path := filepath.Join(checkpointDir, cpt.Name) + if err := os.Mkdir(path, 0755); err != nil { + return err + } + f, err := os.Create(filepath.Join(path, "config.json")) + if err != nil { + return err + } + cpt.Created = time.Now() + err = json.NewEncoder(f).Encode(cpt) + f.Close() + if err != nil { + return err + } + args := []string{ + "checkpoint", + "--image-path", path, + "--work-path", filepath.Join(path, "criu.work"), + } + add := func(flags ...string) { + args = append(args, flags...) + } + add(c.runtimeArgs...) + if !cpt.Exit { + add("--leave-running") + } + if cpt.Shell { + add("--shell-job") + } + if cpt.TCP { + add("--tcp-established") + } + if cpt.UnixSockets { + add("--ext-unix-sk") + } + for _, ns := range cpt.EmptyNS { + add("--empty-ns", ns) + } + add(c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(out)) + } + return err +} + +func (c *container) DeleteCheckpoint(name string, checkpointDir string) error { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + return os.RemoveAll(filepath.Join(checkpointDir, name)) +} + +func (c *container) Start(ctx context.Context, checkpointPath string, s Stdio) (Process, error) { + processRoot := filepath.Join(c.root, c.id, InitProcessID) + if err := os.Mkdir(processRoot, 0755); err != nil { + return nil, err + } + cmd := exec.Command(c.shim, + c.id, c.bundle, c.runtime, + ) + cmd.Dir = processRoot + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + spec, err := c.readSpec() + if err != nil { + return nil, err + } + config := &processConfig{ + checkpoint: checkpointPath, + root: processRoot, + id: InitProcessID, + c: c, + stdio: s, + spec: spec, + processSpec: specs.ProcessSpec(spec.Process), + } + p, err := newProcess(config) + if err != nil { + return nil, err + } + if err := c.createCmd(ctx, InitProcessID, cmd, p); err != nil { + return nil, err + } + return p, nil +} + +func (c *container) Exec(ctx context.Context, pid string, pspec specs.ProcessSpec, s Stdio) (pp Process, err error) { + processRoot := filepath.Join(c.root, c.id, pid) + if err := os.Mkdir(processRoot, 0755); err != nil { + return nil, err + } + defer func() { + if err != nil { + c.RemoveProcess(pid) + } + }() + cmd := exec.Command(c.shim, + c.id, c.bundle, c.runtime, + ) + cmd.Dir = processRoot + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + spec, err := c.readSpec() + if err != nil { + return nil, err + } + config := &processConfig{ + exec: true, + id: pid, + root: processRoot, + c: c, + processSpec: pspec, + spec: spec, + stdio: s, + } + p, err := newProcess(config) + if err != nil { + return nil, err + } + if err := c.createCmd(ctx, pid, cmd, p); err != nil { + return nil, err + } + return p, nil +} + +func (c *container) createCmd(ctx context.Context, pid string, cmd *exec.Cmd, p *process) error { + p.cmd = cmd + if err := cmd.Start(); err != nil { + close(p.cmdDoneCh) + if exErr, ok := err.(*exec.Error); ok { + if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist { + return fmt.Errorf("%s not installed on system", c.shim) + } + } + return err + } + // We need the pid file to have been written to run + defer func() { + go func() { + err := p.cmd.Wait() + if err == nil { + p.cmdSuccess = true + } + + if same, err := p.isSameProcess(); same && p.pid > 0 { + // The process changed its PR_SET_PDEATHSIG, so force + // kill it + logrus.Infof("containerd: %s:%s (pid %v) has become an orphan, killing it", p.container.id, p.id, p.pid) + err = unix.Kill(p.pid, syscall.SIGKILL) + if err != nil && err != syscall.ESRCH { + logrus.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err) + } else { + for { + err = unix.Kill(p.pid, 0) + if err != nil { + break + } + time.Sleep(5 * time.Millisecond) + } + } + } + close(p.cmdDoneCh) + }() + }() + + ch := make(chan error) + go func() { + if err := c.waitForCreate(p, cmd); err != nil { + ch <- err + return + } + c.processes[pid] = p + ch <- nil + }() + select { + case <-ctx.Done(): + cmd.Process.Kill() + cmd.Wait() + <-ch + return ctx.Err() + case err := <-ch: + return err + } +} + +func hostIDFromMap(id uint32, mp []ocs.LinuxIDMapping) int { + for _, m := range mp { + if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) { + return int(m.HostID + (id - m.ContainerID)) + } + } + return 0 +} + +func (c *container) Stats() (*Stat, error) { + now := time.Now() + args := c.runtimeArgs + args = append(args, "events", "--stats", c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%s: %q", err.Error(), out) + } + s := struct { + Data *Stat `json:"data"` + }{} + if err := json.Unmarshal(out, &s); err != nil { + return nil, err + } + s.Data.Timestamp = now + return s.Data, nil +} + +// Status implements the runtime Container interface. +func (c *container) Status() (State, error) { + args := c.runtimeArgs + args = append(args, "state", c.id) + + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return "", fmt.Errorf("%s: %q", err.Error(), out) + } + + // We only require the runtime json output to have a top level Status field. + var s struct { + Status State `json:"status"` + } + if err := json.Unmarshal(out, &s); err != nil { + return "", err + } + return s.Status, nil +} + +func (c *container) writeEventFD(root string, cfd, efd int) error { + f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd)) + return err +} + +type waitArgs struct { + pid int + err error +} + +func (c *container) waitForCreate(p *process, cmd *exec.Cmd) error { + wc := make(chan error, 1) + go func() { + for { + if _, err := p.getPidFromFile(); err != nil { + if os.IsNotExist(err) || err == errInvalidPidInt || err == errContainerNotFound { + alive, err := isAlive(cmd) + if err != nil { + wc <- err + return + } + if !alive { + // runc could have failed to run the container so lets get the error + // out of the logs or the shim could have encountered an error + messages, err := readLogMessages(filepath.Join(p.root, "shim-log.json")) + if err != nil { + wc <- err + return + } + for _, m := range messages { + if m.Level == "error" { + wc <- fmt.Errorf("shim error: %v", m.Msg) + return + } + } + // no errors reported back from shim, check for runc/runtime errors + messages, err = readLogMessages(filepath.Join(p.root, "log.json")) + if err != nil { + if os.IsNotExist(err) { + err = ErrContainerNotStarted + } + wc <- err + return + } + for _, m := range messages { + if m.Level == "error" { + wc <- fmt.Errorf("oci runtime error: %v", m.Msg) + return + } + } + wc <- ErrContainerNotStarted + return + } + time.Sleep(15 * time.Millisecond) + continue + } + wc <- err + return + } + // the pid file was read successfully + wc <- nil + return + } + }() + select { + case err := <-wc: + if err != nil { + return err + } + err = p.saveStartTime() + if err != nil && !os.IsNotExist(err) { + logrus.Warnf("containerd: unable to save %s:%s starttime: %v", p.container.id, p.id, err) + } + return nil + case <-time.After(c.timeout): + cmd.Process.Kill() + cmd.Wait() + return ErrContainerStartTimeout + } +} + +// isAlive checks if the shim that launched the container is still alive +func isAlive(cmd *exec.Cmd) (bool, error) { + if _, err := syscall.Wait4(cmd.Process.Pid, nil, syscall.WNOHANG, nil); err == nil { + return true, nil + } + if err := syscall.Kill(cmd.Process.Pid, 0); err != nil { + if err == syscall.ESRCH { + return false, nil + } + return false, err + } + return true, nil +} + +type oom struct { + id string + root string + eventfd int +} + +func (o *oom) ContainerID() string { + return o.id +} + +func (o *oom) FD() int { + return o.eventfd +} + +func (o *oom) Flush() { + buf := make([]byte, 8) + syscall.Read(o.eventfd, buf) +} + +func (o *oom) Removed() bool { + _, err := os.Lstat(filepath.Join(o.root, "cgroup.event_control")) + return os.IsNotExist(err) +} + +func (o *oom) Close() error { + return syscall.Close(o.eventfd) +} + +type message struct { + Level string `json:"level"` + Msg string `json:"msg"` +} + +func readLogMessages(path string) ([]message, error) { + var out []message + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var m message + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return nil, err + } + out = append(out, m) + } + return out, nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/container_linux.go b/vendor/github.com/containerd/containerd/runtime/container_linux.go new file mode 100644 index 0000000..9f3526a --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/container_linux.go @@ -0,0 +1,190 @@ +package runtime + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/containerd/specs" + ocs "github.com/opencontainers/runtime-spec/specs-go" +) + +func findCgroupMountpointAndRoot(pid int, subsystem string) (string, string, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return "", "", err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + txt := scanner.Text() + fields := strings.Split(txt, " ") + for _, opt := range strings.Split(fields[len(fields)-1], ",") { + if opt == subsystem { + return fields[4], fields[3], nil + } + } + } + if err := scanner.Err(); err != nil { + return "", "", err + } + + return "", "", fmt.Errorf("cgroup path for %s not found", subsystem) +} + +func parseCgroupFile(path string) (map[string]string, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + s := bufio.NewScanner(f) + cgroups := make(map[string]string) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + parts := strings.Split(text, ":") + + for _, subs := range strings.Split(parts[1], ",") { + cgroups[subs] = parts[2] + } + } + return cgroups, nil +} + +func (c *container) OOM() (OOM, error) { + p := c.processes[InitProcessID] + if p == nil { + return nil, fmt.Errorf("no init process found") + } + + mountpoint, hostRoot, err := findCgroupMountpointAndRoot(os.Getpid(), "memory") + if err != nil { + return nil, err + } + + cgroups, err := parseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", p.pid)) + if err != nil { + return nil, err + } + + root, ok := cgroups["memory"] + if !ok { + return nil, fmt.Errorf("no memory cgroup for container %s", c.ID()) + } + + // Take care of the case were we're running inside a container + // ourself + root = strings.TrimPrefix(root, hostRoot) + + return c.getMemoryEventFD(filepath.Join(mountpoint, root)) +} + +func (c *container) Pids() ([]int, error) { + var pids []int + args := c.runtimeArgs + args = append(args, "ps", "--format=json", c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%s: %q", err.Error(), out) + } + if err := json.Unmarshal(out, &pids); err != nil { + return nil, err + } + return pids, nil +} + +func u64Ptr(i uint64) *uint64 { return &i } +func i64Ptr(i int64) *int64 { return &i } + +func (c *container) UpdateResources(r *Resource) error { + sr := ocs.LinuxResources{ + Memory: &ocs.LinuxMemory{ + Limit: u64Ptr(uint64(r.Memory)), + Reservation: u64Ptr(uint64(r.MemoryReservation)), + Swap: u64Ptr(uint64(r.MemorySwap)), + Kernel: u64Ptr(uint64(r.KernelMemory)), + KernelTCP: u64Ptr(uint64(r.KernelTCPMemory)), + }, + CPU: &ocs.LinuxCPU{ + Shares: u64Ptr(uint64(r.CPUShares)), + Quota: i64Ptr(int64(r.CPUQuota)), + Period: u64Ptr(uint64(r.CPUPeriod)), + Cpus: r.CpusetCpus, + Mems: r.CpusetMems, + }, + BlockIO: &ocs.LinuxBlockIO{ + Weight: &r.BlkioWeight, + }, + Pids: &ocs.LinuxPids{ + Limit: r.PidsLimit, + }, + } + + srStr := bytes.NewBuffer(nil) + if err := json.NewEncoder(srStr).Encode(&sr); err != nil { + return err + } + + args := c.runtimeArgs + args = append(args, "update", "-r", "-", c.id) + cmd := exec.Command(c.runtime, args...) + cmd.Stdin = srStr + b, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf(string(b)) + } + return nil +} + +func getRootIDs(s *specs.Spec) (int, int, error) { + if s == nil { + return 0, 0, nil + } + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == ocs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func (c *container) getMemoryEventFD(root string) (*oom, error) { + f, err := os.Open(filepath.Join(root, "memory.oom_control")) + if err != nil { + return nil, err + } + defer f.Close() + fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if serr != 0 { + return nil, serr + } + if err := c.writeEventFD(root, int(f.Fd()), int(fd)); err != nil { + syscall.Close(int(fd)) + return nil, err + } + return &oom{ + root: root, + id: c.id, + eventfd: int(fd), + }, nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/container_solaris.go b/vendor/github.com/containerd/containerd/runtime/container_solaris.go new file mode 100644 index 0000000..7d9c538 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/container_solaris.go @@ -0,0 +1,48 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" + + "github.com/containerd/containerd/specs" + ocs "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s *specs.Spec) (int, int, error) { + return 0, 0, nil +} + +func (c *container) OOM() (OOM, error) { + return nil, nil +} + +func (c *container) Pids() ([]int, error) { + var pids []int + + // TODO: This could be racy. Needs more investigation. + //we get this information from runz state + cmd := exec.Command(c.runtime, "state", c.id) + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err := cmd.Run(); err != nil { + if strings.Contains(errBuf.String(), "Container not found") { + return nil, errContainerNotFound + } + return nil, fmt.Errorf("Error is: %+v\n", err) + } + response := ocs.State{} + decoder := json.NewDecoder(outBuf) + if err := decoder.Decode(&response); err != nil { + return nil, fmt.Errorf("unable to decode json response: %+v", err) + } + pids = append(pids, response.Pid) + return pids, nil +} + +func (c *container) UpdateResources(r *Resource) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/process.go b/vendor/github.com/containerd/containerd/runtime/process.go new file mode 100644 index 0000000..2df67d9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/process.go @@ -0,0 +1,476 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/osutils" + "github.com/containerd/containerd/specs" + "golang.org/x/sys/unix" +) + +// Process holds the operation allowed on a container's process +type Process interface { + io.Closer + + // ID of the process. + // This is either "init" when it is the container's init process or + // it is a user provided id for the process similar to the container id + ID() string + // Start unblocks the associated container init process. + // This should only be called on the process with ID "init" + Start() error + CloseStdin() error + Resize(int, int) error + // ExitFD returns the fd the provides an event when the process exits + ExitFD() int + // ExitStatus returns the exit status of the process or an error if it + // has not exited + ExitStatus() (uint32, error) + // Spec returns the process spec that created the process + Spec() specs.ProcessSpec + // Signal sends the provided signal to the process + Signal(os.Signal) error + // Container returns the container that the process belongs to + Container() Container + // Stdio of the container + Stdio() Stdio + // SystemPid is the pid on the system + SystemPid() int + // State returns if the process is running or not + State() State + // Wait reaps the shim process if avaliable + Wait() +} + +type processConfig struct { + id string + root string + processSpec specs.ProcessSpec + spec *specs.Spec + c *container + stdio Stdio + exec bool + checkpoint string +} + +func newProcess(config *processConfig) (*process, error) { + p := &process{ + root: config.root, + id: config.id, + container: config.c, + spec: config.processSpec, + stdio: config.stdio, + cmdDoneCh: make(chan struct{}), + state: Running, + } + uid, gid, err := getRootIDs(config.spec) + if err != nil { + return nil, err + } + f, err := os.Create(filepath.Join(config.root, "process.json")) + if err != nil { + return nil, err + } + defer f.Close() + + ps := ProcessState{ + ProcessSpec: config.processSpec, + Exec: config.exec, + PlatformProcessState: PlatformProcessState{ + Checkpoint: config.checkpoint, + RootUID: uid, + RootGID: gid, + }, + Stdin: config.stdio.Stdin, + Stdout: config.stdio.Stdout, + Stderr: config.stdio.Stderr, + RuntimeArgs: config.c.runtimeArgs, + NoPivotRoot: config.c.noPivotRoot, + } + + if err := json.NewEncoder(f).Encode(ps); err != nil { + return nil, err + } + exit, err := getExitPipe(filepath.Join(config.root, ExitFile)) + if err != nil { + return nil, err + } + control, err := getControlPipe(filepath.Join(config.root, ControlFile)) + if err != nil { + return nil, err + } + p.exitPipe = exit + p.controlPipe = control + return p, nil +} + +func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) { + p := &process{ + root: root, + id: id, + container: c, + spec: s.ProcessSpec, + stdio: Stdio{ + Stdin: s.Stdin, + Stdout: s.Stdout, + Stderr: s.Stderr, + }, + state: Stopped, + } + + startTime, err := ioutil.ReadFile(filepath.Join(p.root, StartTimeFile)) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + p.startTime = string(startTime) + + if _, err := p.getPidFromFile(); err != nil { + return nil, err + } + if _, err := p.ExitStatus(); err != nil { + if err == ErrProcessNotExited { + exit, err := getExitPipe(filepath.Join(root, ExitFile)) + if err != nil { + return nil, err + } + p.exitPipe = exit + + control, err := getControlPipe(filepath.Join(root, ControlFile)) + if err != nil { + return nil, err + } + p.controlPipe = control + + p.state = Running + return p, nil + } + return nil, err + } + return p, nil +} + +func readProcStatField(pid int, field int) (string, error) { + data, err := ioutil.ReadFile(filepath.Join(string(filepath.Separator), "proc", strconv.Itoa(pid), "stat")) + if err != nil { + return "", err + } + + if field > 2 { + // First, split out the name since he could contains spaces. + parts := strings.Split(string(data), ") ") + // Now split out the rest, we end up with 2 fields less + parts = strings.Split(parts[1], " ") + return parts[field-2-1], nil // field count start at 1 in manual + } + + parts := strings.Split(string(data), " (") + + if field == 1 { + return parts[0], nil + } + + parts = strings.Split(parts[1], ") ") + return parts[0], nil +} + +type process struct { + root string + id string + pid int + exitPipe *os.File + controlPipe *os.File + container *container + spec specs.ProcessSpec + stdio Stdio + cmd *exec.Cmd + cmdSuccess bool + cmdDoneCh chan struct{} + state State + stateLock sync.Mutex + startTime string +} + +func (p *process) ID() string { + return p.id +} + +func (p *process) Container() Container { + return p.container +} + +func (p *process) SystemPid() int { + return p.pid +} + +// ExitFD returns the fd of the exit pipe +func (p *process) ExitFD() int { + return int(p.exitPipe.Fd()) +} + +func (p *process) CloseStdin() error { + _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0) + return err +} + +func (p *process) Resize(w, h int) error { + _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h) + return err +} + +func (p *process) updateExitStatusFile(status uint32) (uint32, error) { + p.stateLock.Lock() + p.state = Stopped + p.stateLock.Unlock() + err := ioutil.WriteFile(filepath.Join(p.root, ExitStatusFile), []byte(fmt.Sprintf("%d", status)), 0644) + return status, err +} + +func (p *process) handleSigkilledShim(rst uint32, rerr error) (uint32, error) { + if p.cmd == nil || p.cmd.Process == nil { + e := unix.Kill(p.pid, 0) + if e == syscall.ESRCH { + logrus.Warnf("containerd: %s:%s (pid %d) does not exist", p.container.id, p.id, p.pid) + // The process died while containerd was down (probably of + // SIGKILL, but no way to be sure) + return p.updateExitStatusFile(UnknownStatus) + } + + // If it's not the same process, just mark it stopped and set + // the status to the UnknownStatus value (i.e. 255) + if same, err := p.isSameProcess(); !same { + logrus.Warnf("containerd: %s:%s (pid %d) is not the same process anymore (%v)", p.container.id, p.id, p.pid, err) + // Create the file so we get the exit event generated once monitor kicks in + // without having to go through all this process again + return p.updateExitStatusFile(UnknownStatus) + } + + ppid, err := readProcStatField(p.pid, 4) + if err != nil { + return rst, fmt.Errorf("could not check process ppid: %v (%v)", err, rerr) + } + if ppid == "1" { + logrus.Warnf("containerd: %s:%s shim died, killing associated process", p.container.id, p.id) + unix.Kill(p.pid, syscall.SIGKILL) + if err != nil && err != syscall.ESRCH { + return UnknownStatus, fmt.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err) + } + + // wait for the process to die + for { + e := unix.Kill(p.pid, 0) + if e == syscall.ESRCH { + break + } + time.Sleep(5 * time.Millisecond) + } + // Create the file so we get the exit event generated once monitor kicks in + // without having to go through all this process again + return p.updateExitStatusFile(128 + uint32(syscall.SIGKILL)) + } + + return rst, rerr + } + + // Possible that the shim was SIGKILLED + e := unix.Kill(p.cmd.Process.Pid, 0) + if e != syscall.ESRCH { + return rst, rerr + } + + // Ensure we got the shim ProcessState + <-p.cmdDoneCh + + shimStatus := p.cmd.ProcessState.Sys().(syscall.WaitStatus) + if shimStatus.Signaled() && shimStatus.Signal() == syscall.SIGKILL { + logrus.Debugf("containerd: ExitStatus(container: %s, process: %s): shim was SIGKILL'ed reaping its child with pid %d", p.container.id, p.id, p.pid) + + rerr = nil + rst = 128 + uint32(shimStatus.Signal()) + + p.stateLock.Lock() + p.state = Stopped + p.stateLock.Unlock() + } + + return rst, rerr +} + +func (p *process) ExitStatus() (rst uint32, rerr error) { + data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile)) + defer func() { + if rerr != nil { + rst, rerr = p.handleSigkilledShim(rst, rerr) + } + }() + if err != nil { + if os.IsNotExist(err) { + return UnknownStatus, ErrProcessNotExited + } + return UnknownStatus, err + } + if len(data) == 0 { + return UnknownStatus, ErrProcessNotExited + } + p.stateLock.Lock() + p.state = Stopped + p.stateLock.Unlock() + + i, err := strconv.ParseUint(string(data), 10, 32) + return uint32(i), err +} + +func (p *process) Spec() specs.ProcessSpec { + return p.spec +} + +func (p *process) Stdio() Stdio { + return p.stdio +} + +// Close closes any open files and/or resouces on the process +func (p *process) Close() error { + err := p.exitPipe.Close() + if cerr := p.controlPipe.Close(); err == nil { + err = cerr + } + return err +} + +func (p *process) State() State { + p.stateLock.Lock() + defer p.stateLock.Unlock() + return p.state +} + +func (p *process) readStartTime() (string, error) { + return readProcStatField(p.pid, 22) +} + +func (p *process) saveStartTime() error { + startTime, err := p.readStartTime() + if err != nil { + return err + } + + p.startTime = startTime + return ioutil.WriteFile(filepath.Join(p.root, StartTimeFile), []byte(startTime), 0644) +} + +func (p *process) isSameProcess() (bool, error) { + if p.pid == 0 { + _, err := p.getPidFromFile() + if err != nil { + return false, err + } + } + + // for backward compat assume it's the same if startTime wasn't set + if p.startTime == "" { + // Sometimes the process dies before we can get the starttime, + // check that the process actually exists + if err := unix.Kill(p.pid, 0); err != syscall.ESRCH { + return true, nil + } + return false, nil + } + + startTime, err := p.readStartTime() + if err != nil { + return false, err + } + + return startTime == p.startTime, nil +} + +// Wait will reap the shim process +func (p *process) Wait() { + if p.cmdDoneCh != nil { + <-p.cmdDoneCh + } +} + +func getExitPipe(path string) (*os.File, error) { + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + // add NONBLOCK in case the other side has already closed or else + // this function would never return + return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) +} + +func getControlPipe(path string) (*os.File, error) { + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0) +} + +// Signal sends the provided signal to the process +func (p *process) Signal(s os.Signal) error { + return syscall.Kill(p.pid, s.(syscall.Signal)) +} + +// Start unblocks the associated container init process. +// This should only be called on the process with ID "init" +func (p *process) Start() error { + if p.ID() == InitProcessID { + var ( + errC = make(chan error, 1) + args = append(p.container.runtimeArgs, "start", p.container.id) + cmd = exec.Command(p.container.runtime, args...) + ) + go func() { + out, err := cmd.CombinedOutput() + if err != nil { + errC <- fmt.Errorf("%s: %q", err.Error(), out) + } + errC <- nil + }() + select { + case err := <-errC: + if err != nil { + return err + } + case <-p.cmdDoneCh: + if !p.cmdSuccess { + if cmd.Process != nil { + cmd.Process.Kill() + } + cmd.Wait() + return ErrShimExited + } + err := <-errC + if err != nil { + return err + } + } + } + return nil +} + +// Delete delete any resources held by the container +func (p *process) Delete() error { + var ( + args = append(p.container.runtimeArgs, "delete", "-f", p.container.id) + cmd = exec.Command(p.container.runtime, args...) + ) + + cmd.SysProcAttr = osutils.SetPDeathSig() + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %v", out, err) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/process_linux.go b/vendor/github.com/containerd/containerd/runtime/process_linux.go new file mode 100644 index 0000000..d14c4d8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/process_linux.go @@ -0,0 +1,22 @@ +// +build linux + +package runtime + +import ( + "io/ioutil" + "path/filepath" + "strconv" +) + +func (p *process) getPidFromFile() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(p.root, "pid")) + if err != nil { + return -1, err + } + i, err := strconv.Atoi(string(data)) + if err != nil { + return -1, errInvalidPidInt + } + p.pid = i + return i, nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/process_solaris.go b/vendor/github.com/containerd/containerd/runtime/process_solaris.go new file mode 100644 index 0000000..8159f30 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/process_solaris.go @@ -0,0 +1,34 @@ +// +build solaris + +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + + runtimespec "github.com/opencontainers/runtime-spec/specs-go" +) + +// On Solaris we already have a state file maintained by the framework. +// This is read by runz state. We just call that instead of maintaining +// a separate file. +func (p *process) getPidFromFile() (int, error) { + //we get this information from runz state + cmd := exec.Command("runc", "state", p.container.ID()) + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err := cmd.Run(); err != nil { + // TODO: Improve logic + return -1, errContainerNotFound + } + response := runtimespec.State{} + decoder := json.NewDecoder(outBuf) + if err := decoder.Decode(&response); err != nil { + return -1, fmt.Errorf("unable to decode json response: %+v", err) + } + p.pid = response.Pid + return p.pid, nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/runtime.go b/vendor/github.com/containerd/containerd/runtime/runtime.go new file mode 100644 index 0000000..eaba452 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/runtime.go @@ -0,0 +1,132 @@ +package runtime + +import ( + "errors" + "time" + + "github.com/containerd/containerd/specs" +) + +var ( + // ErrContainerExited is returned when access to an exited + // container is attempted + ErrContainerExited = errors.New("containerd: container has exited") + // ErrProcessNotExited is returned when trying to retrieve the exit + // status of an alive process + ErrProcessNotExited = errors.New("containerd: process has not exited") + // ErrContainerNotStarted is returned when a container fails to + // start without error from the shim or the OCI runtime + ErrContainerNotStarted = errors.New("containerd: container not started") + // ErrContainerStartTimeout is returned if a container takes too + // long to start + ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout") + // ErrShimExited is returned if the shim or the contianer's init process + // exits before completing + ErrShimExited = errors.New("containerd: shim exited before container process was started") + + errNoPidFile = errors.New("containerd: no process pid file found") + errInvalidPidInt = errors.New("containerd: process pid is invalid") + errContainerNotFound = errors.New("containerd: container not found") + errNotImplemented = errors.New("containerd: not implemented") +) + +const ( + // ExitFile holds the name of the pipe used to monitor process + // exit + ExitFile = "exit" + // ExitStatusFile holds the name of the file where the container + // exit code is to be written + ExitStatusFile = "exitStatus" + // StateFile holds the name of the file where the container state + // is written + StateFile = "state.json" + // ControlFile holds the name of the pipe used to control the shim + ControlFile = "control" + // InitProcessID holds the special ID used for the very first + // container's process + InitProcessID = "init" + // StartTimeFile holds the name of the file in which the process + // start time is saved + StartTimeFile = "starttime" + + // UnknownStatus is the value returned when a process exit + // status cannot be determined + UnknownStatus = 255 +) + +// Checkpoint holds information regarding a container checkpoint +type Checkpoint struct { + // Timestamp is the time that checkpoint happened + Created time.Time `json:"created"` + // Name is the name of the checkpoint + Name string `json:"name"` + // TCP checkpoints open tcp connections + TCP bool `json:"tcp"` + // UnixSockets persists unix sockets in the checkpoint + UnixSockets bool `json:"unixSockets"` + // Shell persists tty sessions in the checkpoint + Shell bool `json:"shell"` + // Exit exits the container after the checkpoint is finished + Exit bool `json:"exit"` + // EmptyNS tells CRIU to omit a specified namespace + EmptyNS []string `json:"emptyNS,omitempty"` +} + +// PlatformProcessState container platform-specific fields in the ProcessState structure +type PlatformProcessState struct { + Checkpoint string `json:"checkpoint"` + RootUID int `json:"rootUID"` + RootGID int `json:"rootGID"` +} + +// State represents a container state +type State string + +// Resource regroups the various container limits that can be updated +type Resource struct { + CPUShares int64 + BlkioWeight uint16 + CPUPeriod int64 + CPUQuota int64 + CpusetCpus string + CpusetMems string + KernelMemory int64 + KernelTCPMemory int64 + Memory int64 + MemoryReservation int64 + MemorySwap int64 + PidsLimit int64 +} + +// Possible container states +const ( + Paused = State("paused") + Stopped = State("stopped") + Running = State("running") +) + +type state struct { + Bundle string `json:"bundle"` + Labels []string `json:"labels"` + Stdin string `json:"stdin"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` + Runtime string `json:"runtime"` + RuntimeArgs []string `json:"runtimeArgs"` + Shim string `json:"shim"` + NoPivotRoot bool `json:"noPivotRoot"` +} + +// ProcessState holds the process OCI specs along with various fields +// required by containerd +type ProcessState struct { + specs.ProcessSpec + Exec bool `json:"exec"` + Stdin string `json:"containerdStdin"` + Stdout string `json:"containerdStdout"` + Stderr string `json:"containerdStderr"` + RuntimeArgs []string `json:"runtimeArgs"` + NoPivotRoot bool `json:"noPivotRoot"` + + PlatformProcessState +} diff --git a/vendor/github.com/containerd/containerd/runtime/stats.go b/vendor/github.com/containerd/containerd/runtime/stats.go new file mode 100644 index 0000000..4cfa9e9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/stats.go @@ -0,0 +1,87 @@ +package runtime + +import "time" + +// Stat holds a container statistics +type Stat struct { + // Timestamp is the time that the statistics where collected + Timestamp time.Time + CPU CPU `json:"cpu"` + Memory Memory `json:"memory"` + Pids Pids `json:"pids"` + Blkio Blkio `json:"blkio"` + Hugetlb map[string]Hugetlb `json:"hugetlb"` +} + +// Hugetlb holds information regarding a container huge tlb usage +type Hugetlb struct { + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +// BlkioEntry represents a single record for a Blkio stat +type BlkioEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +// Blkio regroups all the Blkio related stats +type Blkio struct { + IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` + IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` + IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` + IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` + IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` + IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` + IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` + SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` +} + +// Pids holds the stat of the pid usage of the machine +type Pids struct { + Current uint64 `json:"current,omitempty"` + Limit uint64 `json:"limit,omitempty"` +} + +// Throttling holds a cpu throttling information +type Throttling struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` + ThrottledTime uint64 `json:"throttledTime,omitempty"` +} + +// CPUUsage holds information regarding cpu usage +type CPUUsage struct { + // Units: nanoseconds. + Total uint64 `json:"total,omitempty"` + Percpu []uint64 `json:"percpu,omitempty"` + Kernel uint64 `json:"kernel"` + User uint64 `json:"user"` +} + +// CPU regroups both a CPU usage and throttling information +type CPU struct { + Usage CPUUsage `json:"usage,omitempty"` + Throttling Throttling `json:"throttling,omitempty"` +} + +// MemoryEntry regroups statistic about a given type of memory +type MemoryEntry struct { + Limit uint64 `json:"limit"` + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +// Memory holds information regarding the different type of memories available +type Memory struct { + Cache uint64 `json:"cache,omitempty"` + Usage MemoryEntry `json:"usage,omitempty"` + Swap MemoryEntry `json:"swap,omitempty"` + Kernel MemoryEntry `json:"kernel,omitempty"` + KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` + Raw map[string]uint64 `json:"raw,omitempty"` +} diff --git a/vendor/github.com/containerd/containerd/specs/spec_linux.go b/vendor/github.com/containerd/containerd/specs/spec_linux.go new file mode 100644 index 0000000..0b31604 --- /dev/null +++ b/vendor/github.com/containerd/containerd/specs/spec_linux.go @@ -0,0 +1,12 @@ +package specs + +import oci "github.com/opencontainers/runtime-spec/specs-go" + +type ( + // ProcessSpec aliases the platform process specs + ProcessSpec oci.Process + // Spec aliases the platform oci spec + Spec oci.Spec + // Rlimit aliases the platform resource limit + Rlimit oci.LinuxRlimit +) diff --git a/vendor/github.com/containerd/containerd/specs/spec_solaris.go b/vendor/github.com/containerd/containerd/specs/spec_solaris.go new file mode 100644 index 0000000..1b60d3d --- /dev/null +++ b/vendor/github.com/containerd/containerd/specs/spec_solaris.go @@ -0,0 +1,10 @@ +package specs + +import ocs "github.com/opencontainers/runtime-spec/specs-go" + +type ( + // ProcessSpec aliases the platform process specs + ProcessSpec ocs.Process + // Spec aliases the platform oci spec + Spec ocs.Spec +) diff --git a/vendor/github.com/containerd/containerd/supervisor/add_process.go b/vendor/github.com/containerd/containerd/supervisor/add_process.go new file mode 100644 index 0000000..6c56b82 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/add_process.go @@ -0,0 +1,53 @@ +package supervisor + +import ( + "os" + "time" + + "github.com/containerd/containerd/runtime" + "github.com/containerd/containerd/specs" + "golang.org/x/net/context" +) + +// AddProcessTask holds everything necessary to add a process to a +// container +type AddProcessTask struct { + baseTask + ID string + PID string + Stdout string + Stderr string + Stdin string + ProcessSpec *specs.ProcessSpec + StartResponse chan StartResponse + Ctx context.Context +} + +func (s *Supervisor) addProcess(t *AddProcessTask) error { + start := time.Now() + ci, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + process, err := ci.container.Exec(t.Ctx, t.PID, *t.ProcessSpec, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) + if err != nil { + return err + } + s.newExecSyncChannel(t.ID, t.PID) + if err := s.monitorProcess(process); err != nil { + s.deleteExecSyncChannel(t.ID, t.PID) + // Kill process + process.Signal(os.Kill) + ci.container.RemoveProcess(t.PID) + return err + } + ExecProcessTimer.UpdateSince(start) + t.StartResponse <- StartResponse{ExecPid: process.SystemPid()} + s.notifySubscribers(Event{ + Timestamp: time.Now(), + Type: StateStartProcess, + PID: t.PID, + ID: t.ID, + }) + return nil +} diff --git a/vendor/github.com/containerd/containerd/supervisor/checkpoint.go b/vendor/github.com/containerd/containerd/supervisor/checkpoint.go new file mode 100644 index 0000000..ce05aea --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/checkpoint.go @@ -0,0 +1,37 @@ +// +build !windows + +package supervisor + +import "github.com/containerd/containerd/runtime" + +// CreateCheckpointTask holds needed parameters to create a new checkpoint +type CreateCheckpointTask struct { + baseTask + ID string + CheckpointDir string + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) createCheckpoint(t *CreateCheckpointTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + return i.container.Checkpoint(*t.Checkpoint, t.CheckpointDir) +} + +// DeleteCheckpointTask holds needed parameters to delete a checkpoint +type DeleteCheckpointTask struct { + baseTask + ID string + CheckpointDir string + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) deleteCheckpoint(t *DeleteCheckpointTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + return i.container.DeleteCheckpoint(t.Checkpoint.Name, t.CheckpointDir) +} diff --git a/vendor/github.com/containerd/containerd/supervisor/create.go b/vendor/github.com/containerd/containerd/supervisor/create.go new file mode 100644 index 0000000..3e782c6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/create.go @@ -0,0 +1,71 @@ +package supervisor + +import ( + "path/filepath" + "time" + + "github.com/containerd/containerd/runtime" + "golang.org/x/net/context" +) + +// StartTask holds needed parameters to create a new container +type StartTask struct { + baseTask + ID string + BundlePath string + Stdout string + Stderr string + Stdin string + StartResponse chan StartResponse + Labels []string + NoPivotRoot bool + Checkpoint *runtime.Checkpoint + CheckpointDir string + Runtime string + RuntimeArgs []string + Ctx context.Context +} + +func (s *Supervisor) start(t *StartTask) error { + start := time.Now() + rt := s.runtime + rtArgs := s.runtimeArgs + if t.Runtime != "" { + rt = t.Runtime + rtArgs = t.RuntimeArgs + } + container, err := runtime.New(runtime.ContainerOpts{ + Root: s.stateDir, + ID: t.ID, + Bundle: t.BundlePath, + Runtime: rt, + RuntimeArgs: rtArgs, + Shim: s.shim, + Labels: t.Labels, + NoPivotRoot: t.NoPivotRoot, + Timeout: s.timeout, + }) + if err != nil { + return err + } + s.containers[t.ID] = &containerInfo{ + container: container, + } + ContainersCounter.Inc(1) + task := &startTask{ + Err: t.ErrorCh(), + Container: container, + StartResponse: t.StartResponse, + Stdin: t.Stdin, + Stdout: t.Stdout, + Stderr: t.Stderr, + Ctx: t.Ctx, + } + if t.Checkpoint != nil { + task.CheckpointPath = filepath.Join(t.CheckpointDir, t.Checkpoint.Name) + } + + s.startTasks <- task + ContainerCreateTimer.UpdateSince(start) + return errDeferredResponse +} diff --git a/vendor/github.com/containerd/containerd/supervisor/create_solaris.go b/vendor/github.com/containerd/containerd/supervisor/create_solaris.go new file mode 100644 index 0000000..444e55b --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/create_solaris.go @@ -0,0 +1,8 @@ +package supervisor + +type platformStartTask struct { +} + +// Checkpoint not supported on Solaris +func (task *startTask) setTaskCheckpoint(t *StartTask) { +} diff --git a/vendor/github.com/containerd/containerd/supervisor/delete.go b/vendor/github.com/containerd/containerd/supervisor/delete.go new file mode 100644 index 0000000..3f5f6d8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/delete.go @@ -0,0 +1,56 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/runtime" +) + +// DeleteTask holds needed parameters to remove a container +type DeleteTask struct { + baseTask + ID string + Status uint32 + PID string + NoEvent bool + Process runtime.Process +} + +func (s *Supervisor) delete(t *DeleteTask) error { + if i, ok := s.containers[t.ID]; ok { + start := time.Now() + if err := s.deleteContainer(i.container); err != nil { + logrus.WithField("error", err).Error("containerd: deleting container") + } + if t.Process != nil { + t.Process.Wait() + } + if !t.NoEvent { + execMap := s.getExecSyncMap(t.ID) + go func() { + // Wait for all exec processe events to be sent (we seem + // to sometimes receive them after the init event) + for _, ch := range execMap { + <-ch + } + s.deleteExecSyncMap(t.ID) + s.notifySubscribers(Event{ + Type: StateExit, + Timestamp: time.Now(), + ID: t.ID, + Status: t.Status, + PID: t.PID, + }) + }() + } + ContainersCounter.Dec(1) + ContainerDeleteTimer.UpdateSince(start) + } + return nil +} + +func (s *Supervisor) deleteContainer(container runtime.Container) error { + delete(s.containers, container.ID()) + return container.Delete() +} diff --git a/vendor/github.com/containerd/containerd/supervisor/errors.go b/vendor/github.com/containerd/containerd/supervisor/errors.go new file mode 100644 index 0000000..930807d --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/errors.go @@ -0,0 +1,28 @@ +package supervisor + +import "errors" + +var ( + // ErrContainerNotFound is returned when the container ID passed + // for a given operation is invalid + ErrContainerNotFound = errors.New("containerd: container not found") + // ErrProcessNotFound is returned when the process ID passed for + // a given operation is invalid + ErrProcessNotFound = errors.New("containerd: process not found for container") + // ErrUnknownContainerStatus is returned when the container status + // cannot be determined + ErrUnknownContainerStatus = errors.New("containerd: unknown container status ") + // ErrUnknownTask is returned when an unknown Task type is + // scheduled (should never happen). + ErrUnknownTask = errors.New("containerd: unknown task type") + + // Internal errors + errShutdown = errors.New("containerd: supervisor is shutdown") + errRootNotAbs = errors.New("containerd: rootfs path is not an absolute path") + errNoContainerForPid = errors.New("containerd: pid not registered for any container") + // internal error where the handler will defer to another for the final response + // + // TODO: we could probably do a typed error with another error channel for this to make it + // less like magic + errDeferredResponse = errors.New("containerd: deferred response") +) diff --git a/vendor/github.com/containerd/containerd/supervisor/exit.go b/vendor/github.com/containerd/containerd/supervisor/exit.go new file mode 100644 index 0000000..9977c18 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/exit.go @@ -0,0 +1,93 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/runtime" +) + +// ExitTask holds needed parameters to execute the exit task +type ExitTask struct { + baseTask + Process runtime.Process +} + +func (s *Supervisor) exit(t *ExitTask) error { + start := time.Now() + proc := t.Process + status, err := proc.ExitStatus() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + "pid": proc.ID(), + "id": proc.Container().ID(), + "systemPid": proc.SystemPid(), + }).Error("containerd: get exit status") + } + logrus.WithFields(logrus.Fields{ + "pid": proc.ID(), + "status": status, + "id": proc.Container().ID(), + "systemPid": proc.SystemPid(), + }).Debug("containerd: process exited") + + // if the process is the the init process of the container then + // fire a separate event for this process + if proc.ID() != runtime.InitProcessID { + ne := &ExecExitTask{ + ID: proc.Container().ID(), + PID: proc.ID(), + Status: status, + Process: proc, + } + s.execExit(ne) + return nil + } + container := proc.Container() + ne := &DeleteTask{ + ID: container.ID(), + Status: status, + PID: proc.ID(), + Process: proc, + } + s.delete(ne) + + ExitProcessTimer.UpdateSince(start) + + return nil +} + +// ExecExitTask holds needed parameters to execute the exec exit task +type ExecExitTask struct { + baseTask + ID string + PID string + Status uint32 + Process runtime.Process +} + +func (s *Supervisor) execExit(t *ExecExitTask) error { + container := t.Process.Container() + // exec process: we remove this process without notifying the main event loop + if err := container.RemoveProcess(t.PID); err != nil { + logrus.WithField("error", err).Error("containerd: find container for pid") + } + synCh := s.getExecSyncChannel(t.ID, t.PID) + // If the exec spawned children which are still using its IO + // waiting here will block until they die or close their IO + // descriptors. + // Hence, we use a go routine to avoid blocking all other operations + go func() { + t.Process.Wait() + s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.ID, + Type: StateExit, + PID: t.PID, + Status: t.Status, + }) + close(synCh) + }() + return nil +} diff --git a/vendor/github.com/containerd/containerd/supervisor/get_containers.go b/vendor/github.com/containerd/containerd/supervisor/get_containers.go new file mode 100644 index 0000000..528c02f --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/get_containers.go @@ -0,0 +1,47 @@ +package supervisor + +import "github.com/containerd/containerd/runtime" + +// GetContainersTask holds needed parameters to retrieve a list of +// containers +type GetContainersTask struct { + baseTask + ID string + GetState func(c runtime.Container) (interface{}, error) + + Containers []runtime.Container + States []interface{} +} + +func (s *Supervisor) getContainers(t *GetContainersTask) error { + + if t.ID != "" { + ci, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + t.Containers = append(t.Containers, ci.container) + if t.GetState != nil { + st, err := t.GetState(ci.container) + if err != nil { + return err + } + t.States = append(t.States, st) + } + + return nil + } + + for _, ci := range s.containers { + t.Containers = append(t.Containers, ci.container) + if t.GetState != nil { + st, err := t.GetState(ci.container) + if err != nil { + return err + } + t.States = append(t.States, st) + } + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/supervisor/machine.go b/vendor/github.com/containerd/containerd/supervisor/machine.go new file mode 100644 index 0000000..387c73a --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/machine.go @@ -0,0 +1,28 @@ +// +build !solaris + +package supervisor + +import "github.com/cloudfoundry/gosigar" + +// Machine holds the current machine cpu count and ram size +type Machine struct { + Cpus int + Memory int64 +} + +// CollectMachineInformation returns information regarding the current +// machine (e.g. CPU count, RAM amount) +func CollectMachineInformation() (Machine, error) { + m := Machine{} + cpu := sigar.CpuList{} + if err := cpu.Get(); err != nil { + return m, err + } + m.Cpus = len(cpu.List) + mem := sigar.Mem{} + if err := mem.Get(); err != nil { + return m, err + } + m.Memory = int64(mem.Total / 1024 / 1024) + return m, nil +} diff --git a/vendor/github.com/containerd/containerd/supervisor/machine_solaris.go b/vendor/github.com/containerd/containerd/supervisor/machine_solaris.go new file mode 100644 index 0000000..3320394 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/machine_solaris.go @@ -0,0 +1,41 @@ +package supervisor + +/* +#include +*/ +import "C" + +import ( + "errors" +) + +// Machine holds the current machine cpu count and ram size +type Machine struct { + Cpus int + Memory int64 +} + +// CollectMachineInformation returns information regarding the current +// machine (e.g. CPU count, RAM amount) +func CollectMachineInformation() (Machine, error) { + m := Machine{} + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + if ncpus <= 0 { + return m, errors.New("Unable to get number of cpus") + } + m.Cpus = int(ncpus) + + memTotal := getTotalMem() + if memTotal < 0 { + return m, errors.New("Unable to get total memory") + } + m.Memory = int64(memTotal / 1024 / 1024) + return m, nil +} + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} diff --git a/vendor/github.com/containerd/containerd/supervisor/metrics.go b/vendor/github.com/containerd/containerd/supervisor/metrics.go new file mode 100644 index 0000000..e49170d --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/metrics.go @@ -0,0 +1,42 @@ +package supervisor + +import "github.com/rcrowley/go-metrics" + +var ( + // ContainerCreateTimer holds the metrics timer associated with container creation + ContainerCreateTimer = metrics.NewTimer() + // ContainerDeleteTimer holds the metrics timer associated with container deletion + ContainerDeleteTimer = metrics.NewTimer() + // ContainerStartTimer holds the metrics timer associated with container start duration + ContainerStartTimer = metrics.NewTimer() + // ContainerStatsTimer holds the metrics timer associated with container stats generation + ContainerStatsTimer = metrics.NewTimer() + // ContainersCounter keeps track of the number of active containers + ContainersCounter = metrics.NewCounter() + // EventSubscriberCounter keeps track of the number of active event subscribers + EventSubscriberCounter = metrics.NewCounter() + // TasksCounter keeps track of the number of active supervisor tasks + TasksCounter = metrics.NewCounter() + // ExecProcessTimer holds the metrics timer associated with container exec + ExecProcessTimer = metrics.NewTimer() + // ExitProcessTimer holds the metrics timer associated with reporting container exit status + ExitProcessTimer = metrics.NewTimer() + // EpollFdCounter keeps trac of how many process are being monitored + EpollFdCounter = metrics.NewCounter() +) + +// Metrics return the list of all available metrics +func Metrics() map[string]interface{} { + return map[string]interface{}{ + "container-create-time": ContainerCreateTimer, + "container-delete-time": ContainerDeleteTimer, + "container-start-time": ContainerStartTimer, + "container-stats-time": ContainerStatsTimer, + "containers": ContainersCounter, + "event-subscribers": EventSubscriberCounter, + "tasks": TasksCounter, + "exec-process-time": ExecProcessTimer, + "exit-process-time": ExitProcessTimer, + "epoll-fds": EpollFdCounter, + } +} diff --git a/vendor/github.com/containerd/containerd/supervisor/monitor_linux.go b/vendor/github.com/containerd/containerd/supervisor/monitor_linux.go new file mode 100644 index 0000000..b643b88 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/monitor_linux.go @@ -0,0 +1,147 @@ +package supervisor + +import ( + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/archutils" + "github.com/containerd/containerd/runtime" +) + +// NewMonitor starts a new process monitor and returns it +func NewMonitor() (*Monitor, error) { + m := &Monitor{ + receivers: make(map[int]interface{}), + exits: make(chan runtime.Process, 1024), + ooms: make(chan string, 1024), + } + fd, err := archutils.EpollCreate1(syscall.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + m.epollFd = fd + go m.start() + return m, nil +} + +// Monitor represents a runtime.Process monitor +type Monitor struct { + m sync.Mutex + receivers map[int]interface{} + exits chan runtime.Process + ooms chan string + epollFd int +} + +// Exits returns the channel used to notify of a process exit +func (m *Monitor) Exits() chan runtime.Process { + return m.exits +} + +// OOMs returns the channel used to notify of a container exit due to OOM +func (m *Monitor) OOMs() chan string { + return m.ooms +} + +// Monitor adds a process to the list of the one being monitored +func (m *Monitor) Monitor(p runtime.Process) error { + m.m.Lock() + defer m.m.Unlock() + fd := p.ExitFD() + event := syscall.EpollEvent{ + Fd: int32(fd), + Events: syscall.EPOLLHUP, + } + if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = p + return nil +} + +// MonitorOOM adds a container to the list of the ones monitored for OOM +func (m *Monitor) MonitorOOM(c runtime.Container) error { + m.m.Lock() + defer m.m.Unlock() + o, err := c.OOM() + if err != nil { + return err + } + fd := o.FD() + event := syscall.EpollEvent{ + Fd: int32(fd), + Events: syscall.EPOLLHUP | syscall.EPOLLIN, + } + if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = o + return nil +} + +// Close cleans up resources allocated by NewMonitor() +func (m *Monitor) Close() error { + return syscall.Close(m.epollFd) +} + +func (m *Monitor) processEvent(fd int, event uint32) { + m.m.Lock() + r := m.receivers[fd] + switch t := r.(type) { + case runtime.Process: + if event == syscall.EPOLLHUP { + delete(m.receivers, fd) + if err := syscall.EpollCtl(m.epollFd, syscall.EPOLL_CTL_DEL, fd, &syscall.EpollEvent{ + Events: syscall.EPOLLHUP, + Fd: int32(fd), + }); err != nil { + logrus.WithField("error", err).Error("containerd: epoll remove fd") + } + if err := t.Close(); err != nil { + logrus.WithField("error", err).Error("containerd: close process IO") + } + EpollFdCounter.Dec(1) + // defer until lock is released + defer func() { + m.exits <- t + }() + } + case runtime.OOM: + // always flush the event fd + t.Flush() + if t.Removed() { + delete(m.receivers, fd) + // epoll will remove the fd from its set after it has been closed + t.Close() + EpollFdCounter.Dec(1) + } else { + // defer until lock is released + defer func() { + m.ooms <- t.ContainerID() + }() + } + } + // This cannot be a defer to avoid a deadlock in case the channels + // above get full + m.m.Unlock() +} + +func (m *Monitor) start() { + var events [128]syscall.EpollEvent + for { + n, err := archutils.EpollWait(m.epollFd, events[:], -1) + if err != nil { + if err == syscall.EINTR { + continue + } + logrus.WithField("error", err).Fatal("containerd: epoll wait") + } + // process events + for i := 0; i < n; i++ { + m.processEvent(int(events[i].Fd), events[i].Events) + } + } +} diff --git a/vendor/github.com/containerd/containerd/supervisor/monitor_solaris.go b/vendor/github.com/containerd/containerd/supervisor/monitor_solaris.go new file mode 100644 index 0000000..5b6b18e --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/monitor_solaris.go @@ -0,0 +1,144 @@ +// +build solaris,cgo + +package supervisor + +/* +#include +#include +#include +#include +#include + +int portAssociate(int port, int fd) { + if (port_associate(port, PORT_SOURCE_FD, fd, POLLIN | POLLHUP, NULL) < 0) { + return 1; + } +} + +port_event_t* getEvent(int e_fd) { + port_event_t *ev; + ev = (port_event_t *)malloc(sizeof(port_event_t)); + if (port_get(e_fd, ev, NULL) < 0) { + return NULL; + } + return ev; +} + +int getFd(uintptr_t x) { + return *(int *)x; +} + +void freeEvent( port_event_t *ev){ + free(ev); +} +*/ +import "C" +import ( + "sync" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/runtime" +) + +// NewMonitor starts a new process monitor and returns it +func NewMonitor() (*Monitor, error) { + m := &Monitor{ + receivers: make(map[int]interface{}), + exits: make(chan runtime.Process, 1024), + ooms: make(chan string, 1024), + } + fd, err := C.port_create() + if err != nil { + return nil, err + } + + m.epollFd = int(fd) + go m.start() + return m, nil +} + +// Monitor represents a runtime.Process monitor +type Monitor struct { + m sync.Mutex + receivers map[int]interface{} + exits chan runtime.Process + ooms chan string + epollFd int +} + +// Exits returns the channel used to notify of a process exit +func (m *Monitor) Exits() chan runtime.Process { + return m.exits +} + +// OOMs returns the channel used to notify of a container exit due to OOM +func (m *Monitor) OOMs() chan string { + return m.ooms +} + +// Monitor adds a process to the list of the one being monitored +func (m *Monitor) Monitor(p runtime.Process) error { + m.m.Lock() + defer m.m.Unlock() + fd := p.ExitFD() + if _, err := C.port_associate(C.int(m.epollFd), C.PORT_SOURCE_FD, C.uintptr_t(fd), C.POLLIN|C.POLLHUP, unsafe.Pointer(&fd)); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = p + return nil +} + +// MonitorOOM adds a container to the list of the ones monitored for OOM +// There is no OOM-Killer on Solaris therefore nothing to setup +func (m *Monitor) MonitorOOM(c runtime.Container) error { + return nil +} + +// Close cleans up resources allocated by NewMonitor() +func (m *Monitor) Close() error { + _, err := C.close(C.int(m.epollFd)) + return err +} + +func (m *Monitor) start() { + for { + ev := C.getEvent(C.int(m.epollFd)) + if ev == nil { + continue + } + fd := int(C.getFd(C.uintptr_t(uintptr((ev.portev_user))))) + + if fd < 0 { + logrus.Warnf("containerd: epoll wait") + } + + m.m.Lock() + r := m.receivers[fd] + switch t := r.(type) { + case runtime.Process: + if ev.portev_events == C.POLLHUP { + delete(m.receivers, fd) + if err := t.Close(); err != nil { + logrus.Warnf("containerd: close process IO") + } + EpollFdCounter.Dec(1) + m.exits <- t + } + case runtime.OOM: + // always flush the event fd + t.Flush() + if t.Removed() { + delete(m.receivers, fd) + // epoll will remove the fd from its set after it has been closed + t.Close() + EpollFdCounter.Dec(1) + } else { + m.ooms <- t.ContainerID() + } + } + m.m.Unlock() + C.freeEvent(ev) + } +} diff --git a/vendor/github.com/containerd/containerd/supervisor/oom.go b/vendor/github.com/containerd/containerd/supervisor/oom.go new file mode 100644 index 0000000..9d8c23d --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/oom.go @@ -0,0 +1,23 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +// OOMTask holds needed parameters to report a container OOM +type OOMTask struct { + baseTask + ID string +} + +func (s *Supervisor) oom(t *OOMTask) error { + logrus.WithField("id", t.ID).Debug("containerd: container oom") + s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.ID, + Type: StateOOM, + }) + return nil +} diff --git a/vendor/github.com/containerd/containerd/supervisor/signal.go b/vendor/github.com/containerd/containerd/supervisor/signal.go new file mode 100644 index 0000000..55dfa79 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/signal.go @@ -0,0 +1,30 @@ +package supervisor + +import ( + "os" +) + +// SignalTask holds needed parameters to signal a container +type SignalTask struct { + baseTask + ID string + PID string + Signal os.Signal +} + +func (s *Supervisor) signal(t *SignalTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + processes, err := i.container.Processes() + if err != nil { + return err + } + for _, p := range processes { + if p.ID() == t.PID { + return p.Signal(t.Signal) + } + } + return ErrProcessNotFound +} diff --git a/vendor/github.com/containerd/containerd/supervisor/sort.go b/vendor/github.com/containerd/containerd/supervisor/sort.go new file mode 100644 index 0000000..35ccce1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/sort.go @@ -0,0 +1,27 @@ +package supervisor + +import ( + "sort" + + "github.com/containerd/containerd/runtime" +) + +func sortProcesses(p []runtime.Process) { + sort.Sort(&processSorter{p}) +} + +type processSorter struct { + processes []runtime.Process +} + +func (s *processSorter) Len() int { + return len(s.processes) +} + +func (s *processSorter) Swap(i, j int) { + s.processes[i], s.processes[j] = s.processes[j], s.processes[i] +} + +func (s *processSorter) Less(i, j int) bool { + return s.processes[j].ID() == runtime.InitProcessID +} diff --git a/vendor/github.com/containerd/containerd/supervisor/stats.go b/vendor/github.com/containerd/containerd/supervisor/stats.go new file mode 100644 index 0000000..baeae25 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/stats.go @@ -0,0 +1,34 @@ +package supervisor + +import ( + "time" + + "github.com/containerd/containerd/runtime" +) + +// StatsTask holds needed parameters to retrieve a container statistics +type StatsTask struct { + baseTask + ID string + Stat chan *runtime.Stat +} + +func (s *Supervisor) stats(t *StatsTask) error { + start := time.Now() + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + // TODO: use workers for this + go func() { + s, err := i.container.Stats() + if err != nil { + t.ErrorCh() <- err + return + } + t.ErrorCh() <- nil + t.Stat <- s + ContainerStatsTimer.UpdateSince(start) + }() + return errDeferredResponse +} diff --git a/vendor/github.com/containerd/containerd/supervisor/supervisor.go b/vendor/github.com/containerd/containerd/supervisor/supervisor.go new file mode 100644 index 0000000..41539b1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/supervisor.go @@ -0,0 +1,452 @@ +package supervisor + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/runtime" +) + +const ( + defaultBufferSize = 2048 // size of queue in eventloop +) + +// New returns an initialized Process supervisor. +func New(stateDir string, runtimeName, shimName string, runtimeArgs []string, timeout time.Duration, retainCount int) (*Supervisor, error) { + startTasks := make(chan *startTask, 10) + machine, err := CollectMachineInformation() + if err != nil { + return nil, err + } + monitor, err := NewMonitor() + if err != nil { + return nil, err + } + s := &Supervisor{ + stateDir: stateDir, + containers: make(map[string]*containerInfo), + startTasks: startTasks, + machine: machine, + subscribers: make(map[chan Event]struct{}), + tasks: make(chan Task, defaultBufferSize), + monitor: monitor, + runtime: runtimeName, + runtimeArgs: runtimeArgs, + shim: shimName, + timeout: timeout, + containerExecSync: make(map[string]map[string]chan struct{}), + } + if err := setupEventLog(s, retainCount); err != nil { + return nil, err + } + go s.exitHandler() + go s.oomHandler() + if err := s.restore(); err != nil { + return nil, err + } + return s, nil +} + +type containerInfo struct { + container runtime.Container +} + +func setupEventLog(s *Supervisor, retainCount int) error { + if err := readEventLog(s); err != nil { + return err + } + logrus.WithField("count", len(s.eventLog)).Debug("containerd: read past events") + events := s.Events(time.Time{}, false, "") + return eventLogger(s, filepath.Join(s.stateDir, "events.log"), events, retainCount) +} + +func eventLogger(s *Supervisor, path string, events chan Event, retainCount int) error { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755) + if err != nil { + return err + } + go func() { + var ( + count = len(s.eventLog) + enc = json.NewEncoder(f) + ) + for e := range events { + // if we have a specified retain count make sure the truncate the event + // log if it grows past the specified number of events to keep. + if retainCount > 0 { + if count > retainCount { + logrus.Debug("truncating event log") + // close the log file + if f != nil { + f.Close() + } + slice := retainCount - 1 + l := len(s.eventLog) + if slice >= l { + slice = l + } + s.eventLock.Lock() + s.eventLog = s.eventLog[len(s.eventLog)-slice:] + s.eventLock.Unlock() + if f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755); err != nil { + logrus.WithField("error", err).Error("containerd: open event to journal") + continue + } + enc = json.NewEncoder(f) + count = 0 + for _, le := range s.eventLog { + if err := enc.Encode(le); err != nil { + logrus.WithField("error", err).Error("containerd: write event to journal") + } + } + } + } + s.eventLock.Lock() + s.eventLog = append(s.eventLog, e) + s.eventLock.Unlock() + count++ + if err := enc.Encode(e); err != nil { + logrus.WithField("error", err).Error("containerd: write event to journal") + } + } + }() + return nil +} + +func readEventLog(s *Supervisor) error { + f, err := os.Open(filepath.Join(s.stateDir, "events.log")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var e eventV1 + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + break + } + return err + } + + // We need to take care of -1 Status for backward compatibility + ev := e.Event + ev.Status = uint32(e.Status) + if ev.Status > runtime.UnknownStatus { + ev.Status = runtime.UnknownStatus + } + s.eventLog = append(s.eventLog, ev) + } + return nil +} + +// Supervisor represents a container supervisor +type Supervisor struct { + // stateDir is the directory on the system to store container runtime state information. + stateDir string + // name of the OCI compatible runtime used to execute containers + runtime string + runtimeArgs []string + shim string + containers map[string]*containerInfo + startTasks chan *startTask + // we need a lock around the subscribers map only because additions and deletions from + // the map are via the API so we cannot really control the concurrency + subscriberLock sync.RWMutex + subscribers map[chan Event]struct{} + machine Machine + tasks chan Task + monitor *Monitor + eventLog []Event + eventLock sync.Mutex + timeout time.Duration + // This is used to ensure that exec process death events are sent + // before the init process death + containerExecSyncLock sync.Mutex + containerExecSync map[string]map[string]chan struct{} +} + +// Stop closes all startTasks and sends a SIGTERM to each container's pid1 then waits for they to +// terminate. After it has handled all the SIGCHILD events it will close the signals chan +// and exit. Stop is a non-blocking call and will return after the containers have been signaled +func (s *Supervisor) Stop() { + // Close the startTasks channel so that no new containers get started + close(s.startTasks) +} + +// Close closes any open files in the supervisor but expects that Stop has been +// callsed so that no more containers are started. +func (s *Supervisor) Close() error { + return nil +} + +// Event represents a container event +type Event struct { + ID string `json:"id"` + Type string `json:"type"` + Timestamp time.Time `json:"timestamp"` + PID string `json:"pid,omitempty"` + Status uint32 `json:"status,omitempty"` +} + +type eventV1 struct { + Event + Status int `json:"status,omitempty"` +} + +// Events returns an event channel that external consumers can use to receive updates +// on container events +func (s *Supervisor) Events(from time.Time, storedOnly bool, id string) chan Event { + c := make(chan Event, defaultBufferSize) + if storedOnly { + defer s.Unsubscribe(c) + } + s.subscriberLock.Lock() + defer s.subscriberLock.Unlock() + if !from.IsZero() { + // replay old event + s.eventLock.Lock() + past := s.eventLog[:] + s.eventLock.Unlock() + for _, e := range past { + if e.Timestamp.After(from) { + if id == "" || e.ID == id { + c <- e + } + } + } + } + if storedOnly { + close(c) + } else { + EventSubscriberCounter.Inc(1) + s.subscribers[c] = struct{}{} + } + return c +} + +// Unsubscribe removes the provided channel from receiving any more events +func (s *Supervisor) Unsubscribe(sub chan Event) { + s.subscriberLock.Lock() + defer s.subscriberLock.Unlock() + if _, ok := s.subscribers[sub]; ok { + delete(s.subscribers, sub) + close(sub) + EventSubscriberCounter.Dec(1) + } +} + +// notifySubscribers will send the provided event to the external subscribers +// of the events channel +func (s *Supervisor) notifySubscribers(e Event) { + s.subscriberLock.RLock() + defer s.subscriberLock.RUnlock() + for sub := range s.subscribers { + // do a non-blocking send for the channel + select { + case sub <- e: + default: + logrus.WithField("event", e.Type).Warn("containerd: event not sent to subscriber") + } + } +} + +// Start is a non-blocking call that runs the supervisor for monitoring contianer processes and +// executing new containers. +// +// This event loop is the only thing that is allowed to modify state of containers and processes +// therefore it is save to do operations in the handlers that modify state of the system or +// state of the Supervisor +func (s *Supervisor) Start() error { + logrus.WithFields(logrus.Fields{ + "stateDir": s.stateDir, + "runtime": s.runtime, + "runtimeArgs": s.runtimeArgs, + "memory": s.machine.Memory, + "cpus": s.machine.Cpus, + }).Debug("containerd: supervisor running") + go func() { + for i := range s.tasks { + s.handleTask(i) + } + }() + return nil +} + +// Machine returns the machine information for which the +// supervisor is executing on. +func (s *Supervisor) Machine() Machine { + return s.machine +} + +// SendTask sends the provided event the the supervisors main event loop +func (s *Supervisor) SendTask(evt Task) { + TasksCounter.Inc(1) + s.tasks <- evt +} + +func (s *Supervisor) exitHandler() { + for p := range s.monitor.Exits() { + e := &ExitTask{ + Process: p, + } + s.SendTask(e) + } +} + +func (s *Supervisor) oomHandler() { + for id := range s.monitor.OOMs() { + e := &OOMTask{ + ID: id, + } + s.SendTask(e) + } +} + +func (s *Supervisor) monitorProcess(p runtime.Process) error { + return s.monitor.Monitor(p) +} + +func (s *Supervisor) restore() error { + dirs, err := ioutil.ReadDir(s.stateDir) + if err != nil { + return err + } + for _, d := range dirs { + if !d.IsDir() { + continue + } + id := d.Name() + container, err := runtime.Load(s.stateDir, id, s.shim, s.timeout) + if err != nil { + logrus.WithFields(logrus.Fields{"error": err, "id": id}).Warnf("containerd: failed to load container,removing state directory.") + os.RemoveAll(filepath.Join(s.stateDir, id)) + continue + } + processes, err := container.Processes() + if err != nil || len(processes) == 0 { + logrus.WithFields(logrus.Fields{"error": err, "id": id}).Warnf("containerd: container has no process running,removing state directory.") + os.RemoveAll(filepath.Join(s.stateDir, id)) + continue + } + + ContainersCounter.Inc(1) + s.containers[id] = &containerInfo{ + container: container, + } + if err := s.monitor.MonitorOOM(container); err != nil && err != runtime.ErrContainerExited { + logrus.WithField("error", err).Error("containerd: notify OOM events") + } + + s.newExecSyncMap(container.ID()) + + logrus.WithField("id", id).Debug("containerd: container restored") + var exitedProcesses []runtime.Process + for _, p := range processes { + if p.State() == runtime.Running { + if err := s.monitorProcess(p); err != nil { + return err + } + } else { + exitedProcesses = append(exitedProcesses, p) + } + if p.ID() != runtime.InitProcessID { + s.newExecSyncChannel(container.ID(), p.ID()) + } + } + if len(exitedProcesses) > 0 { + // sort processes so that init is fired last because that is how the kernel sends the + // exit events + sortProcesses(exitedProcesses) + for _, p := range exitedProcesses { + e := &ExitTask{ + Process: p, + } + s.SendTask(e) + } + } + } + return nil +} + +func (s *Supervisor) handleTask(i Task) { + var err error + switch t := i.(type) { + case *AddProcessTask: + err = s.addProcess(t) + case *CreateCheckpointTask: + err = s.createCheckpoint(t) + case *DeleteCheckpointTask: + err = s.deleteCheckpoint(t) + case *StartTask: + err = s.start(t) + case *DeleteTask: + err = s.delete(t) + case *ExitTask: + err = s.exit(t) + case *GetContainersTask: + err = s.getContainers(t) + case *SignalTask: + err = s.signal(t) + case *StatsTask: + err = s.stats(t) + case *UpdateTask: + err = s.updateContainer(t) + case *UpdateProcessTask: + err = s.updateProcess(t) + case *OOMTask: + err = s.oom(t) + default: + err = ErrUnknownTask + } + if err != errDeferredResponse { + i.ErrorCh() <- err + close(i.ErrorCh()) + } +} + +func (s *Supervisor) newExecSyncMap(containerID string) { + s.containerExecSyncLock.Lock() + s.containerExecSync[containerID] = make(map[string]chan struct{}) + s.containerExecSyncLock.Unlock() +} + +func (s *Supervisor) newExecSyncChannel(containerID, pid string) { + s.containerExecSyncLock.Lock() + s.containerExecSync[containerID][pid] = make(chan struct{}) + s.containerExecSyncLock.Unlock() +} + +func (s *Supervisor) deleteExecSyncChannel(containerID, pid string) { + s.containerExecSyncLock.Lock() + delete(s.containerExecSync[containerID], pid) + s.containerExecSyncLock.Unlock() +} + +func (s *Supervisor) getExecSyncChannel(containerID, pid string) chan struct{} { + s.containerExecSyncLock.Lock() + ch := s.containerExecSync[containerID][pid] + s.containerExecSyncLock.Unlock() + return ch +} + +func (s *Supervisor) getExecSyncMap(containerID string) map[string]chan struct{} { + s.containerExecSyncLock.Lock() + defer s.containerExecSyncLock.Unlock() + return s.containerExecSync[containerID] +} + +func (s *Supervisor) deleteExecSyncMap(containerID string) { + s.containerExecSyncLock.Lock() + defer s.containerExecSyncLock.Unlock() + delete(s.containerExecSync, containerID) +} diff --git a/vendor/github.com/containerd/containerd/supervisor/task.go b/vendor/github.com/containerd/containerd/supervisor/task.go new file mode 100644 index 0000000..4980a35 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/task.go @@ -0,0 +1,34 @@ +package supervisor + +import ( + "sync" + + "github.com/containerd/containerd/runtime" +) + +// StartResponse is the response containing a started container +type StartResponse struct { + ExecPid int + Container runtime.Container +} + +// Task executes an action returning an error chan with either nil or +// the error from executing the task +type Task interface { + // ErrorCh returns a channel used to report and error from an async task + ErrorCh() chan error +} + +type baseTask struct { + errCh chan error + mu sync.Mutex +} + +func (t *baseTask) ErrorCh() chan error { + t.mu.Lock() + defer t.mu.Unlock() + if t.errCh == nil { + t.errCh = make(chan error, 1) + } + return t.errCh +} diff --git a/vendor/github.com/containerd/containerd/supervisor/types.go b/vendor/github.com/containerd/containerd/supervisor/types.go new file mode 100644 index 0000000..2e36fce --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/types.go @@ -0,0 +1,12 @@ +package supervisor + +// State constants used in Event types +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateStartProcess = "start-process" + StateOOM = "oom" + StateLive = "live" +) diff --git a/vendor/github.com/containerd/containerd/supervisor/update.go b/vendor/github.com/containerd/containerd/supervisor/update.go new file mode 100644 index 0000000..6ee4ff7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/update.go @@ -0,0 +1,95 @@ +package supervisor + +import ( + "time" + + "github.com/containerd/containerd/runtime" +) + +// UpdateTask holds needed parameters to update a container resource constraints +type UpdateTask struct { + baseTask + ID string + State runtime.State + Resources *runtime.Resource +} + +func (s *Supervisor) updateContainer(t *UpdateTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + container := i.container + if t.State != "" { + switch t.State { + case runtime.Running: + if err := container.Resume(); err != nil { + return err + } + s.notifySubscribers(Event{ + ID: t.ID, + Type: StateResume, + Timestamp: time.Now(), + }) + case runtime.Paused: + if err := container.Pause(); err != nil { + return err + } + s.notifySubscribers(Event{ + ID: t.ID, + Type: StatePause, + Timestamp: time.Now(), + }) + default: + return ErrUnknownContainerStatus + } + return nil + } + if t.Resources != nil { + return container.UpdateResources(t.Resources) + } + return nil +} + +// UpdateProcessTask holds needed parameters to update a container +// process terminal size or close its stdin +type UpdateProcessTask struct { + baseTask + ID string + PID string + CloseStdin bool + Width int + Height int +} + +func (s *Supervisor) updateProcess(t *UpdateProcessTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + processes, err := i.container.Processes() + if err != nil { + return err + } + var process runtime.Process + for _, p := range processes { + if p.ID() == t.PID { + process = p + break + } + } + if process == nil { + return ErrProcessNotFound + } + if t.CloseStdin { + if err := process.CloseStdin(); err != nil { + return err + } + } + if t.Width > 0 || t.Height > 0 { + if err := process.Resize(t.Width, t.Height); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/supervisor/worker.go b/vendor/github.com/containerd/containerd/supervisor/worker.go new file mode 100644 index 0000000..652cd62 --- /dev/null +++ b/vendor/github.com/containerd/containerd/supervisor/worker.go @@ -0,0 +1,104 @@ +package supervisor + +import ( + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/runtime" + "golang.org/x/net/context" +) + +// Worker interface +type Worker interface { + Start() +} + +type startTask struct { + Container runtime.Container + CheckpointPath string + Stdin string + Stdout string + Stderr string + Err chan error + StartResponse chan StartResponse + Ctx context.Context +} + +// NewWorker return a new initialized worker +func NewWorker(s *Supervisor, wg *sync.WaitGroup) Worker { + return &worker{ + s: s, + wg: wg, + } +} + +type worker struct { + wg *sync.WaitGroup + s *Supervisor +} + +// Start runs a loop in charge of starting new containers +func (w *worker) Start() { + defer w.wg.Done() + for t := range w.s.startTasks { + started := time.Now() + process, err := t.Container.Start(t.Ctx, t.CheckpointPath, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + "id": t.Container.ID(), + }).Error("containerd: start container") + t.Err <- err + evt := &DeleteTask{ + ID: t.Container.ID(), + NoEvent: true, + Process: process, + } + w.s.SendTask(evt) + continue + } + if err := w.s.monitor.MonitorOOM(t.Container); err != nil && err != runtime.ErrContainerExited { + if process.State() != runtime.Stopped { + logrus.WithField("error", err).Error("containerd: notify OOM events") + } + } + if err := w.s.monitorProcess(process); err != nil { + logrus.WithField("error", err).Error("containerd: add process to monitor") + t.Err <- err + evt := &DeleteTask{ + ID: t.Container.ID(), + NoEvent: true, + Process: process, + } + w.s.SendTask(evt) + continue + } + // only call process start if we aren't restoring from a checkpoint + // if we have restored from a checkpoint then the process is already started + if t.CheckpointPath == "" { + if err := process.Start(); err != nil { + logrus.WithField("error", err).Error("containerd: start init process") + t.Err <- err + evt := &DeleteTask{ + ID: t.Container.ID(), + NoEvent: true, + Process: process, + } + w.s.SendTask(evt) + continue + } + } + ContainerStartTimer.UpdateSince(started) + w.s.newExecSyncMap(t.Container.ID()) + t.Err <- nil + t.StartResponse <- StartResponse{ + Container: t.Container, + } + w.s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.Container.ID(), + Type: StateStart, + }) + } +} diff --git a/vendor/github.com/containerd/containerd/version.go b/vendor/github.com/containerd/containerd/version.go new file mode 100644 index 0000000..7a34f84 --- /dev/null +++ b/vendor/github.com/containerd/containerd/version.go @@ -0,0 +1,20 @@ +package containerd + +import "fmt" + +// VersionMajor holds the release major number +const VersionMajor = 0 + +// VersionMinor holds the release minor number +const VersionMinor = 2 + +// VersionPatch holds the release patch number +const VersionPatch = 3 + +// Version holds the combination of major minor and patch as a string +// of format Major.Minor.Patch +var Version = fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) + +// GitCommit is filled with the Git revision being used to build the +// program at linking time +var GitCommit = "" diff --git a/vendor/github.com/containerd/go-runc/LICENSE b/vendor/github.com/containerd/go-runc/LICENSE new file mode 100644 index 0000000..2abe416 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2016 Michael Crosby. crosbymichael@gmail.com + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH +THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/containerd/go-runc/README.md b/vendor/github.com/containerd/go-runc/README.md new file mode 100644 index 0000000..2629773 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/README.md @@ -0,0 +1,42 @@ +# go-runc + +[![Build Status](https://travis-ci.org/containerd/go-runc.svg?branch=master)](https://travis-ci.org/containerd/go-runc) + + +This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications. +It tries to expose all the settings and features of the runc CLI. If there is something missing then add it, its opensource! + +This needs runc @ [a9610f2c0](https://github.com/opencontainers/runc/commit/a9610f2c0237d2636d05a031ec8659a70e75ffeb) +or greater. + +## Docs + +Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc). + + +## LICENSE - MIT + +Copyright (c) 2016 Michael Crosby. crosbymichael@gmail.com + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH +THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/containerd/go-runc/console.go b/vendor/github.com/containerd/go-runc/console.go new file mode 100644 index 0000000..d84bd9f --- /dev/null +++ b/vendor/github.com/containerd/go-runc/console.go @@ -0,0 +1,98 @@ +// +build cgo + +package runc + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + + "github.com/containerd/console" + "github.com/opencontainers/runc/libcontainer/utils" +) + +// NewConsoleSocket creates a new unix socket at the provided path to accept a +// pty master created by runc for use by the container +func NewConsoleSocket(path string) (*Socket, error) { + abs, err := filepath.Abs(path) + if err != nil { + return nil, err + } + l, err := net.Listen("unix", abs) + if err != nil { + return nil, err + } + return &Socket{ + l: l, + path: abs, + }, nil +} + +// NewTempConsoleSocket returns a temp console socket for use with a container +// On Close(), the socket is deleted +func NewTempConsoleSocket() (*Socket, error) { + dir, err := ioutil.TempDir("", "pty") + if err != nil { + return nil, err + } + abs, err := filepath.Abs(filepath.Join(dir, "pty.sock")) + if err != nil { + return nil, err + } + l, err := net.Listen("unix", abs) + if err != nil { + return nil, err + } + return &Socket{ + l: l, + rmdir: true, + path: abs, + }, nil +} + +// Socket is a unix socket that accepts the pty master created by runc +type Socket struct { + path string + rmdir bool + l net.Listener +} + +// Path returns the path to the unix socket on disk +func (c *Socket) Path() string { + return c.path +} + +// ReceiveMaster blocks until the socket receives the pty master +func (c *Socket) ReceiveMaster() (console.Console, error) { + conn, err := c.l.Accept() + if err != nil { + return nil, err + } + defer conn.Close() + unix, ok := conn.(*net.UnixConn) + if !ok { + return nil, fmt.Errorf("received connection which was not a unix socket") + } + sock, err := unix.File() + if err != nil { + return nil, err + } + f, err := utils.RecvFd(sock) + if err != nil { + return nil, err + } + return console.ConsoleFromFile(f) +} + +// Close closes the unix socket +func (c *Socket) Close() error { + err := c.l.Close() + if c.rmdir { + if rerr := os.RemoveAll(filepath.Dir(c.path)); err == nil { + err = rerr + } + } + return err +} diff --git a/vendor/github.com/containerd/go-runc/container.go b/vendor/github.com/containerd/go-runc/container.go new file mode 100644 index 0000000..7981aac --- /dev/null +++ b/vendor/github.com/containerd/go-runc/container.go @@ -0,0 +1,14 @@ +package runc + +import "time" + +// Container hold information for a runc container +type Container struct { + ID string `json:"id"` + Pid int `json:"pid"` + Status string `json:"status"` + Bundle string `json:"bundle"` + Rootfs string `json:"rootfs"` + Created time.Time `json:"created"` + Annotations map[string]string `json:"annotations"` +} diff --git a/vendor/github.com/containerd/go-runc/events.go b/vendor/github.com/containerd/go-runc/events.go new file mode 100644 index 0000000..ca0e048 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/events.go @@ -0,0 +1,84 @@ +package runc + +type Event struct { + // Type are the event type generated by runc + // If the type is "error" then check the Err field on the event for + // the actual error + Type string `json:"type"` + ID string `json:"id"` + Stats *Stats `json:"data,omitempty"` + // Err has a read error if we were unable to decode the event from runc + Err error `json:"-"` +} + +type Stats struct { + Cpu Cpu `json:"cpu"` + Memory Memory `json:"memory"` + Pids Pids `json:"pids"` + Blkio Blkio `json:"blkio"` + Hugetlb map[string]Hugetlb `json:"hugetlb"` +} + +type Hugetlb struct { + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type BlkioEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type Blkio struct { + IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` + IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` + IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` + IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` + IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` + IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` + IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` + SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` +} + +type Pids struct { + Current uint64 `json:"current,omitempty"` + Limit uint64 `json:"limit,omitempty"` +} + +type Throttling struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` + ThrottledTime uint64 `json:"throttledTime,omitempty"` +} + +type CpuUsage struct { + // Units: nanoseconds. + Total uint64 `json:"total,omitempty"` + Percpu []uint64 `json:"percpu,omitempty"` + Kernel uint64 `json:"kernel"` + User uint64 `json:"user"` +} + +type Cpu struct { + Usage CpuUsage `json:"usage,omitempty"` + Throttling Throttling `json:"throttling,omitempty"` +} + +type MemoryEntry struct { + Limit uint64 `json:"limit"` + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type Memory struct { + Cache uint64 `json:"cache,omitempty"` + Usage MemoryEntry `json:"usage,omitempty"` + Swap MemoryEntry `json:"swap,omitempty"` + Kernel MemoryEntry `json:"kernel,omitempty"` + KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` + Raw map[string]uint64 `json:"raw,omitempty"` +} diff --git a/vendor/github.com/containerd/go-runc/io.go b/vendor/github.com/containerd/go-runc/io.go new file mode 100644 index 0000000..f6f7d2d --- /dev/null +++ b/vendor/github.com/containerd/go-runc/io.go @@ -0,0 +1,165 @@ +package runc + +import ( + "io" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +type IO interface { + io.Closer + Stdin() io.WriteCloser + Stdout() io.ReadCloser + Stderr() io.ReadCloser + Set(*exec.Cmd) +} + +type StartCloser interface { + CloseAfterStart() error +} + +// NewPipeIO creates pipe pairs to be used with runc +func NewPipeIO(uid, gid int) (i IO, err error) { + var pipes []*pipe + // cleanup in case of an error + defer func() { + if err != nil { + for _, p := range pipes { + p.Close() + } + } + }() + stdin, err := newPipe(uid, gid) + if err != nil { + return nil, err + } + pipes = append(pipes, stdin) + + stdout, err := newPipe(uid, gid) + if err != nil { + return nil, err + } + pipes = append(pipes, stdout) + + stderr, err := newPipe(uid, gid) + if err != nil { + return nil, err + } + pipes = append(pipes, stderr) + + return &pipeIO{ + in: stdin, + out: stdout, + err: stderr, + }, nil +} + +func newPipe(uid, gid int) (*pipe, error) { + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + if err := unix.Fchown(int(r.Fd()), uid, gid); err != nil { + return nil, err + } + if err := unix.Fchown(int(w.Fd()), uid, gid); err != nil { + return nil, err + } + return &pipe{ + r: r, + w: w, + }, nil +} + +type pipe struct { + r *os.File + w *os.File +} + +func (p *pipe) Close() error { + err := p.r.Close() + if werr := p.w.Close(); err == nil { + err = werr + } + return err +} + +type pipeIO struct { + in *pipe + out *pipe + err *pipe +} + +func (i *pipeIO) Stdin() io.WriteCloser { + return i.in.w +} + +func (i *pipeIO) Stdout() io.ReadCloser { + return i.out.r +} + +func (i *pipeIO) Stderr() io.ReadCloser { + return i.err.r +} + +func (i *pipeIO) Close() error { + var err error + for _, v := range []*pipe{ + i.in, + i.out, + i.err, + } { + if cerr := v.Close(); err == nil { + err = cerr + } + } + return err +} + +func (i *pipeIO) CloseAfterStart() error { + for _, f := range []*os.File{ + i.out.w, + i.err.w, + } { + f.Close() + } + return nil +} + +// Set sets the io to the exec.Cmd +func (i *pipeIO) Set(cmd *exec.Cmd) { + cmd.Stdin = i.in.r + cmd.Stdout = i.out.w + cmd.Stderr = i.err.w +} + +func NewSTDIO() (IO, error) { + return &stdio{}, nil +} + +type stdio struct { +} + +func (s *stdio) Close() error { + return nil +} + +func (s *stdio) Set(cmd *exec.Cmd) { + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr +} + +func (s *stdio) Stdin() io.WriteCloser { + return os.Stdin +} + +func (s *stdio) Stdout() io.ReadCloser { + return os.Stdout +} + +func (s *stdio) Stderr() io.ReadCloser { + return os.Stderr +} diff --git a/vendor/github.com/containerd/go-runc/monitor.go b/vendor/github.com/containerd/go-runc/monitor.go new file mode 100644 index 0000000..83eebb0 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/monitor.go @@ -0,0 +1,50 @@ +package runc + +import ( + "os/exec" + "syscall" +) + +var Monitor ProcessMonitor = &defaultMonitor{} + +// ProcessMonitor is an interface for process monitoring +// +// It allows daemons using go-runc to have a SIGCHLD handler +// to handle exits without introducing races between the handler +// and go's exec.Cmd +// These methods should match the methods exposed by exec.Cmd to provide +// a consistent experience for the caller +type ProcessMonitor interface { + Output(*exec.Cmd) ([]byte, error) + CombinedOutput(*exec.Cmd) ([]byte, error) + Run(*exec.Cmd) error + Start(*exec.Cmd) error + Wait(*exec.Cmd) (int, error) +} + +type defaultMonitor struct { +} + +func (m *defaultMonitor) Output(c *exec.Cmd) ([]byte, error) { + return c.Output() +} + +func (m *defaultMonitor) CombinedOutput(c *exec.Cmd) ([]byte, error) { + return c.CombinedOutput() +} + +func (m *defaultMonitor) Run(c *exec.Cmd) error { + return c.Run() +} + +func (m *defaultMonitor) Start(c *exec.Cmd) error { + return c.Start() +} + +func (m *defaultMonitor) Wait(c *exec.Cmd) (int, error) { + status, err := c.Process.Wait() + if err != nil { + return -1, err + } + return status.Sys().(syscall.WaitStatus).ExitStatus(), nil +} diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go new file mode 100644 index 0000000..9515deb --- /dev/null +++ b/vendor/github.com/containerd/go-runc/runc.go @@ -0,0 +1,553 @@ +package runc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + "time" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Format is the type of log formatting options avaliable +type Format string + +const ( + none Format = "" + JSON Format = "json" + Text Format = "text" + // DefaultCommand is the default command for Runc + DefaultCommand = "runc" +) + +// Runc is the client to the runc cli +type Runc struct { + //If command is empty, DefaultCommand is used + Command string + Root string + Debug bool + Log string + LogFormat Format + PdeathSignal syscall.Signal + Criu string +} + +// List returns all containers created inside the provided runc root directory +func (r *Runc) List(context context.Context) ([]*Container, error) { + data, err := Monitor.Output(r.command(context, "list", "--format=json")) + if err != nil { + return nil, err + } + var out []*Container + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} + +// State returns the state for the container provided by id +func (r *Runc) State(context context.Context, id string) (*Container, error) { + data, err := Monitor.CombinedOutput(r.command(context, "state", id)) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + var c Container + if err := json.Unmarshal(data, &c); err != nil { + return nil, err + } + return &c, nil +} + +type ConsoleSocket interface { + Path() string +} + +type CreateOpts struct { + IO + // PidFile is a path to where a pid file should be created + PidFile string + ConsoleSocket ConsoleSocket + Detach bool + NoPivot bool + NoNewKeyring bool + ExtraFiles []*os.File +} + +func (o *CreateOpts) args() (out []string, err error) { + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.NoPivot { + out = append(out, "--no-pivot") + } + if o.NoNewKeyring { + out = append(out, "--no-new-keyring") + } + if o.Detach { + out = append(out, "--detach") + } + if o.ExtraFiles != nil { + out = append(out, "--preserve-fds", strconv.Itoa(len(o.ExtraFiles))) + } + return out, nil +} + +// Create creates a new container and returns its pid if it was created successfully +func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOpts) error { + args := []string{"create", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + cmd.ExtraFiles = opts.ExtraFiles + + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := Monitor.CombinedOutput(cmd) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + if err := Monitor.Start(cmd); err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + _, err := Monitor.Wait(cmd) + return err +} + +// Start will start an already created container +func (r *Runc) Start(context context.Context, id string) error { + return r.runOrError(r.command(context, "start", id)) +} + +type ExecOpts struct { + IO + PidFile string + ConsoleSocket ConsoleSocket + Detach bool +} + +func (o *ExecOpts) args() (out []string, err error) { + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.Detach { + out = append(out, "--detach") + } + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + return out, nil +} + +// Exec executres and additional process inside the container based on a full +// OCI Process specification +func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error { + f, err := ioutil.TempFile("", "runc-process") + if err != nil { + return err + } + defer os.Remove(f.Name()) + err = json.NewEncoder(f).Encode(spec) + f.Close() + if err != nil { + return err + } + args := []string{"exec", "--process", f.Name()} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := Monitor.CombinedOutput(cmd) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + if err := Monitor.Start(cmd); err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + _, err = Monitor.Wait(cmd) + return err +} + +// Run runs the create, start, delete lifecycle of the container +// and returns its exit status after it has exited +func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) { + args := []string{"run", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return -1, err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil { + opts.Set(cmd) + } + if err := Monitor.Start(cmd); err != nil { + return -1, err + } + return Monitor.Wait(cmd) +} + +// Delete deletes the container +func (r *Runc) Delete(context context.Context, id string) error { + return r.runOrError(r.command(context, "delete", id)) +} + +// KillOpts specifies options for killing a container and its processes +type KillOpts struct { + All bool +} + +func (o *KillOpts) args() (out []string) { + if o.All { + out = append(out, "--all") + } + return out +} + +// Kill sends the specified signal to the container +func (r *Runc) Kill(context context.Context, id string, sig int, opts *KillOpts) error { + args := []string{ + "kill", + } + if opts != nil { + args = append(args, opts.args()...) + } + return r.runOrError(r.command(context, append(args, id, strconv.Itoa(sig))...)) +} + +// Stats return the stats for a container like cpu, memory, and io +func (r *Runc) Stats(context context.Context, id string) (*Stats, error) { + cmd := r.command(context, "events", "--stats", id) + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + defer func() { + rd.Close() + Monitor.Wait(cmd) + }() + if err := Monitor.Start(cmd); err != nil { + return nil, err + } + var e Event + if err := json.NewDecoder(rd).Decode(&e); err != nil { + return nil, err + } + return e.Stats, nil +} + +// Events returns an event stream from runc for a container with stats and OOM notifications +func (r *Runc) Events(context context.Context, id string, interval time.Duration) (chan *Event, error) { + cmd := r.command(context, "events", fmt.Sprintf("--interval=%ds", int(interval.Seconds())), id) + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + if err := Monitor.Start(cmd); err != nil { + rd.Close() + return nil, err + } + var ( + dec = json.NewDecoder(rd) + c = make(chan *Event, 128) + ) + go func() { + defer func() { + close(c) + rd.Close() + Monitor.Wait(cmd) + }() + for { + var e Event + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + return + } + e = Event{ + Type: "error", + Err: err, + } + } + c <- &e + } + }() + return c, nil +} + +// Pause the container with the provided id +func (r *Runc) Pause(context context.Context, id string) error { + return r.runOrError(r.command(context, "pause", id)) +} + +// Resume the container with the provided id +func (r *Runc) Resume(context context.Context, id string) error { + return r.runOrError(r.command(context, "resume", id)) +} + +// Ps lists all the processes inside the container returning their pids +func (r *Runc) Ps(context context.Context, id string) ([]int, error) { + data, err := Monitor.CombinedOutput(r.command(context, "ps", "--format", "json", id)) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + var pids []int + if err := json.Unmarshal(data, &pids); err != nil { + return nil, err + } + return pids, nil +} + +type CheckpointOpts struct { + // ImagePath is the path for saving the criu image file + ImagePath string + // WorkDir is the working directory for criu + WorkDir string + // ParentPath is the path for previous image files from a pre-dump + ParentPath string + // AllowOpenTCP allows open tcp connections to be checkpointed + AllowOpenTCP bool + // AllowExternalUnixSockets allows external unix sockets to be checkpointed + AllowExternalUnixSockets bool + // AllowTerminal allows the terminal(pty) to be checkpointed with a container + AllowTerminal bool + // CriuPageServer is the address:port for the criu page server + CriuPageServer string + // FileLocks handle file locks held by the container + FileLocks bool + // Cgroups is the cgroup mode for how to handle the checkpoint of a container's cgroups + Cgroups CgroupMode + // EmptyNamespaces creates a namespace for the container but does not save its properties + // Provide the namespaces you wish to be checkpointed without their settings on restore + EmptyNamespaces []string +} + +type CgroupMode string + +const ( + Soft CgroupMode = "soft" + Full CgroupMode = "full" + Strict CgroupMode = "strict" +) + +func (o *CheckpointOpts) args() (out []string) { + if o.ImagePath != "" { + out = append(out, "--image-path", o.ImagePath) + } + if o.WorkDir != "" { + out = append(out, "--work-path", o.WorkDir) + } + if o.ParentPath != "" { + out = append(out, "--parent-path", o.ParentPath) + } + if o.AllowOpenTCP { + out = append(out, "--tcp-established") + } + if o.AllowExternalUnixSockets { + out = append(out, "--ext-unix-sk") + } + if o.AllowTerminal { + out = append(out, "--shell-job") + } + if o.CriuPageServer != "" { + out = append(out, "--page-server", o.CriuPageServer) + } + if o.FileLocks { + out = append(out, "--file-locks") + } + if string(o.Cgroups) != "" { + out = append(out, "--manage-cgroups-mode", string(o.Cgroups)) + } + for _, ns := range o.EmptyNamespaces { + out = append(out, "--empty-ns", ns) + } + return out +} + +type CheckpointAction func([]string) []string + +// LeaveRunning keeps the container running after the checkpoint has been completed +func LeaveRunning(args []string) []string { + return append(args, "--leave-running") +} + +// PreDump allows a pre-dump of the checkpoint to be made and completed later +func PreDump(args []string) []string { + return append(args, "--pre-dump") +} + +// Checkpoint allows you to checkpoint a container using criu +func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOpts, actions ...CheckpointAction) error { + args := []string{"checkpoint"} + if opts != nil { + args = append(args, opts.args()...) + } + for _, a := range actions { + args = a(args) + } + return r.runOrError(r.command(context, append(args, id)...)) +} + +type RestoreOpts struct { + CheckpointOpts + IO + + Detach bool + PidFile string + NoSubreaper bool + NoPivot bool +} + +func (o *RestoreOpts) args() ([]string, error) { + out := o.CheckpointOpts.args() + if o.Detach { + out = append(out, "--detach") + } + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if o.NoPivot { + out = append(out, "--no-pivot") + } + if o.NoSubreaper { + out = append(out, "-no-subreaper") + } + return out, nil +} + +// Restore restores a container with the provide id from an existing checkpoint +func (r *Runc) Restore(context context.Context, id, bundle string, opts *RestoreOpts) (int, error) { + args := []string{"restore"} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return -1, err + } + args = append(args, oargs...) + } + args = append(args, "--bundle", bundle) + cmd := r.command(context, append(args, id)...) + if opts != nil { + opts.Set(cmd) + } + if err := Monitor.Start(cmd); err != nil { + return -1, err + } + return Monitor.Wait(cmd) +} + +// Update updates the current container with the provided resource spec +func (r *Runc) Update(context context.Context, id string, resources *specs.LinuxResources) error { + buf := bytes.NewBuffer(nil) + if err := json.NewEncoder(buf).Encode(resources); err != nil { + return err + } + args := []string{"update", "--resources", "-", id} + cmd := r.command(context, args...) + cmd.Stdin = buf + return r.runOrError(cmd) +} + +func (r *Runc) args() (out []string) { + if r.Root != "" { + out = append(out, "--root", r.Root) + } + if r.Debug { + out = append(out, "--debug") + } + if r.Log != "" { + out = append(out, "--log", r.Log) + } + if r.LogFormat != none { + out = append(out, "--log-format", string(r.LogFormat)) + } + if r.Criu != "" { + out = append(out, "--criu", r.Criu) + } + return out +} + +func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { + command := r.Command + if command == "" { + command = DefaultCommand + } + cmd := exec.CommandContext(context, command, append(r.args(), args...)...) + if r.PdeathSignal != 0 { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: r.PdeathSignal, + } + } + return cmd +} + +// runOrError will run the provided command. If an error is +// encountered and neither Stdout or Stderr was set the error and the +// stderr of the command will be returned in the format of : +// +func (r *Runc) runOrError(cmd *exec.Cmd) error { + if cmd.Stdout != nil || cmd.Stderr != nil { + return Monitor.Run(cmd) + } + data, err := Monitor.CombinedOutput(cmd) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil +} diff --git a/vendor/github.com/containerd/go-runc/utils.go b/vendor/github.com/containerd/go-runc/utils.go new file mode 100644 index 0000000..81fcd3f --- /dev/null +++ b/vendor/github.com/containerd/go-runc/utils.go @@ -0,0 +1,28 @@ +package runc + +import ( + "io/ioutil" + "strconv" + "syscall" +) + +// ReadPidFile reads the pid file at the provided path and returns +// the pid or an error if the read and conversion is unsuccessful +func ReadPidFile(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(string(data)) +} + +const exitSignalOffset = 128 + +// exitStatus returns the correct exit status for a process based on if it +// was signaled or exited cleanly +func exitStatus(status syscall.WaitStatus) int { + if status.Signaled() { + return exitSignalOffset + int(status.Signal()) + } + return status.ExitStatus() +} diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 0000000..b39ddfa --- /dev/null +++ b/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/README.md b/vendor/github.com/coreos/etcd/README.md new file mode 100644 index 0000000..e7d4e23 --- /dev/null +++ b/vendor/github.com/coreos/etcd/README.md @@ -0,0 +1,139 @@ +# etcd + +[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/etcd)](https://goreportcard.com/report/github.com/coreos/etcd) +[![Build Status](https://travis-ci.org/coreos/etcd.svg?branch=master)](https://travis-ci.org/coreos/etcd) +[![Build Status](https://semaphoreci.com/api/v1/coreos/etcd/branches/master/shields_badge.svg)](https://semaphoreci.com/coreos/etcd) +[![Docker Repository on Quay.io](https://quay.io/repository/coreos/etcd-git/status "Docker Repository on Quay.io")](https://quay.io/repository/coreos/etcd-git) + +**Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries. + +*the etcd v2 [documentation](Documentation/v2/README.md) has moved* + +![etcd Logo](logos/etcd-horizontal-color.png) + +etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being: + +* *Simple*: well-defined, user-facing API (gRPC) +* *Secure*: automatic TLS with optional client cert authentication +* *Fast*: benchmarked 10,000 writes/sec +* *Reliable*: properly distributed using Raft + +etcd is written in Go and uses the [Raft][raft] consensus algorithm to manage a highly-available replicated log. + +etcd is used [in production by many companies](./Documentation/production-users.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [fleet][fleet], [locksmith][locksmith], [vulcand][vulcand], [Doorman][doorman], and many others. Reliability is further ensured by rigorous [testing][etcd-tests]. + +See [etcdctl][etcdctl] for a simple command line client. + +[raft]: https://raft.github.io/ +[k8s]: http://kubernetes.io/ +[doorman]: https://github.com/youtube/doorman +[fleet]: https://github.com/coreos/fleet +[locksmith]: https://github.com/coreos/locksmith +[vulcand]: https://github.com/vulcand/vulcand +[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl +[etcd-tests]: http://dash.etcd.io + +## Getting started + +### Getting etcd + +The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release]. + +For those wanting to try the very latest version, you can [build the latest version of etcd][dl-build] from the `master` branch. +You will first need [*Go*](https://golang.org/) installed on your machine (version 1.6+ is required). +All development occurs on `master`, including new features and bug fixes. +Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide. + +[github-release]: https://github.com/coreos/etcd/releases/ +[branch-management]: ./Documentation/branch_management.md +[dl-build]: ./Documentation/dl_build.md#build-the-latest-version + +### Running etcd + +First start a single-member cluster of etcd: + +```sh +./bin/etcd +``` + +This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication. + +Next, let's set a single key, and then retrieve it: + +``` +ETCDCTL_API=3 etcdctl put mykey "this is awesome" +ETCDCTL_API=3 etcdctl get mykey +``` + +That's it! etcd is now running and serving client requests. For more + +- [Animated quick demo][demo-gif] +- [Interactive etcd playground][etcd-play] + +[demo-gif]: ./Documentation/demo.md +[etcd-play]: http://play.etcd.io/ + +### etcd TCP ports + +The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. + +[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd + +### Running a local etcd cluster + +First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications. + +Our [Procfile script](./Procfile) will set up a local example cluster. Start it with: + +```sh +goreman start +``` + +This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy `proxy`, which runs locally and composes a cluster. + +Every cluster member and proxy accepts key value reads and key value writes. + +### Running etcd on Kubernetes + +If you want to run etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator). + +### Next steps + +Now it's time to dig into the full etcd API and other guides. + +- Read the full [documentation][fulldoc]. +- Explore the full gRPC [API][api]. +- Set up a [multi-machine cluster][clustering]. +- Learn the [config format, env variables and flags][configuration]. +- Find [language bindings and tools][libraries-and-tools]. +- Use TLS to [secure an etcd cluster][security]. +- [Tune etcd][tuning]. + +[fulldoc]: ./Documentation/docs.md +[api]: ./Documentation/dev-guide/api_reference_v3.md +[clustering]: ./Documentation/op-guide/clustering.md +[configuration]: ./Documentation/op-guide/configuration.md +[libraries-and-tools]: ./Documentation/libraries-and-tools.md +[security]: ./Documentation/op-guide/security.md +[tuning]: ./Documentation/tuning.md + +## Contact + +- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) +- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org +- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/milestones), [roadmap](./ROADMAP.md) +- Bugs: [issues](https://github.com/coreos/etcd/issues) + +## Contributing + +See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow. + +## Reporting bugs + +See [reporting bugs](Documentation/reporting_bugs.md) for details about reporting any issue you may encounter. + +### License + +etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. + + diff --git a/vendor/github.com/coreos/etcd/pkg/README.md b/vendor/github.com/coreos/etcd/pkg/README.md new file mode 100644 index 0000000..d7de4d3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/README.md @@ -0,0 +1,2 @@ +pkg/ is a collection of utility packages used by etcd without being specific to etcd itself. A package belongs here +only if it could possibly be moved out into its own repository in the future. diff --git a/vendor/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/github.com/coreos/etcd/pkg/crc/crc.go new file mode 100644 index 0000000..4b998a4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/crc/crc.go @@ -0,0 +1,43 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc provides utility function for cyclic redundancy check +// algorithms. +package crc + +import ( + "hash" + "hash/crc32" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +type digest struct { + crc uint32 + tab *crc32.Table +} + +// New creates a new hash.Hash32 computing the CRC-32 checksum +// using the polynomial represented by the Table. +// Modified by xiangli to take a prevcrc. +func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = crc32.Update(d.crc, d.tab, p) + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go new file mode 100644 index 0000000..58a77df --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go @@ -0,0 +1,22 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package fileutil + +import "os" + +// OpenDir opens a directory for syncing. +func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go new file mode 100644 index 0000000..c123395 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go @@ -0,0 +1,46 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "os" + "syscall" +) + +// OpenDir opens a directory in windows with write access for syncing. +func OpenDir(path string) (*os.File, error) { + fd, err := openDir(path) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func openDir(path string) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + createmode := uint32(syscall.OPEN_EXISTING) + fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go new file mode 100644 index 0000000..9585ed5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -0,0 +1,121 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fileutil implements utility functions related to files and paths. +package fileutil + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "sort" + + "github.com/coreos/pkg/capnslog" +) + +const ( + // PrivateFileMode grants owner to read/write a file. + PrivateFileMode = 0600 + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0700 +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil") +) + +// IsDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func IsDirWriteable(dir string) error { + f := path.Join(dir, ".touch") + if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + return err + } + return os.Remove(f) +} + +// ReadDir returns the filenames in the given directory in sorted order. +func ReadDir(dirpath string) ([]string, error) { + dir, err := os.Open(dirpath) + if err != nil { + return nil, err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory +// does not exists. TouchDirAll also ensures the given directory is writable. +func TouchDirAll(dir string) error { + // If path is already a directory, MkdirAll does nothing + // and returns nil. + err := os.MkdirAll(dir, PrivateDirMode) + if err != nil { + // if mkdirAll("a/text") and "text" is not + // a directory, this will return syscall.ENOTDIR + return err + } + return IsDirWriteable(dir) +} + +// CreateDirAll is similar to TouchDirAll but returns error +// if the deepest directory was not empty. +func CreateDirAll(dir string) error { + err := TouchDirAll(dir) + if err == nil { + var ns []string + ns, err = ReadDir(dir) + if err != nil { + return err + } + if len(ns) != 0 { + err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) + } + } + return err +} + +func Exist(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily +// shorten the length of the file. +func ZeroToEnd(f *os.File) error { + // TODO: support FALLOC_FL_ZERO_RANGE + off, err := f.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + lenf, lerr := f.Seek(0, os.SEEK_END) + if lerr != nil { + return lerr + } + if err = f.Truncate(off); err != nil { + return err + } + // make sure blocks remain allocated + if err = Preallocate(f, lenf, true); err != nil { + return err + } + _, err = f.Seek(off, os.SEEK_SET) + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go new file mode 100644 index 0000000..338627f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go @@ -0,0 +1,26 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "errors" + "os" +) + +var ( + ErrLocked = errors.New("fileutil: file already locked") +) + +type LockedFile struct{ *os.File } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go new file mode 100644 index 0000000..542550b --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go new file mode 100644 index 0000000..dec25a1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go @@ -0,0 +1,96 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +// This used to call syscall.Flock() but that call fails with EBADF on NFS. +// An alternative is lockf() which works on NFS but that call lets a process lock +// the same file twice. Instead, use Linux's non-standard open file descriptor +// locks which will block if the process already holds the file lock. +// +// constants from /usr/include/bits/fcntl-linux.h +const ( + F_OFD_GETLK = 37 + F_OFD_SETLK = 37 + F_OFD_SETLKW = 38 +) + +var ( + wrlck = syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + } + + linuxTryLockFile = flockTryLockFile + linuxLockFile = flockLockFile +) + +func init() { + // use open file descriptor locks if the system supports it + getlk := syscall.Flock_t{Type: syscall.F_RDLCK} + if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil { + linuxTryLockFile = ofdTryLockFile + linuxLockFile = ofdLockFile + } +} + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxTryLockFile(path, flag, perm) +} + +func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + flock := wrlck + if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxLockFile(path, flag, perm) +} + +func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + flock := wrlck + err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) + + if err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go new file mode 100644 index 0000000..fee6a7c --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go @@ -0,0 +1,45 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "syscall" + "time" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + f, err := os.Open(path, flag, perm) + if err != nil { + return nil, ErrLocked + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + for { + f, err := os.OpenFile(path, flag, perm) + if err == nil { + return &LockedFile{f}, nil + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go new file mode 100644 index 0000000..352ca55 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go @@ -0,0 +1,62 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + lock.Pid = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil { + f.Close() + if err == syscall.EAGAIN { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go new file mode 100644 index 0000000..ed01164 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris,!linux + +package fileutil + +import ( + "os" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockTryLockFile(path, flag, perm) +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockLockFile(path, flag, perm) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go new file mode 100644 index 0000000..8698f4a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go @@ -0,0 +1,125 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "errors" + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + + errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.") +) + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + LOCKFILE_EXCLUSIVE_LOCK = 2 + LOCKFILE_FAIL_IMMEDIATELY = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func open(path string, flag int, perm os.FileMode) (*os.File, error) { + if path == "" { + return nil, fmt.Errorf("cannot open empty filename") + } + var access uint32 + switch flag { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + case syscall.O_WRONLY | syscall.O_CREAT: + access = syscall.GENERIC_ALL + default: + panic(fmt.Errorf("flag %v is not supported", flag)) + } + fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), + access, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, + syscall.OPEN_ALWAYS, + syscall.FILE_ATTRIBUTE_NORMAL, + 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func lockFile(fd syscall.Handle, flags uint32) error { + var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK + flag |= flags + if fd == syscall.InvalidHandle { + return nil + } + err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err.Error() == errLocked.Error() { + return ErrLocked + } else if err != errLockViolation { + return err + } + return nil +} + +func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + var reserved uint32 = 0 + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go new file mode 100644 index 0000000..bb7f028 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -0,0 +1,47 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import "os" + +// Preallocate tries to allocate the space for given +// file. This operation is only supported on linux by a +// few filesystems (btrfs, ext4, etc.). +// If the operation is unsupported, no error will be returned. +// Otherwise, the error encountered will be returned. +func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if extendFile { + return preallocExtend(f, sizeInBytes) + } + return preallocFixed(f, sizeInBytes) +} + +func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { + curOff, err := f.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + size, err := f.Seek(sizeInBytes, os.SEEK_END) + if err != nil { + return err + } + if _, err = f.Seek(curOff, os.SEEK_SET); err != nil { + return err + } + if sizeInBytes > size { + return nil + } + return f.Truncate(sizeInBytes) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go new file mode 100644 index 0000000..1ed09c5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go @@ -0,0 +1,43 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" + "unsafe" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + if err := preallocFixed(f, sizeInBytes); err != nil { + return err + } + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + fstore := &syscall.Fstore_t{ + Flags: syscall.F_ALLOCATEALL, + Posmode: syscall.F_PEOFPOSMODE, + Length: sizeInBytes} + p := unsafe.Pointer(fstore) + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p)) + if errno == 0 || errno == syscall.ENOTSUP { + return nil + } + return errno +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go new file mode 100644 index 0000000..50bd84f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + // use mode = 0 to change size + err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // not supported; fallback + // fallocate EINTRs frequently in some environments; fallback + if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + return preallocExtendTrunc(f, sizeInBytes) + } + } + return err +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE + err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // treat not supported as nil error + if ok && errno == syscall.ENOTSUP { + return nil + } + } + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go new file mode 100644 index 0000000..162fbc5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go @@ -0,0 +1,25 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +func preallocExtend(f *os.File, sizeInBytes int64) error { + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { return nil } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go new file mode 100644 index 0000000..53bda0c --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go @@ -0,0 +1,78 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path" + "sort" + "strings" + "time" +) + +func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { + return purgeFile(dirname, suffix, max, interval, stop, nil) +} + +// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. +func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error { + errC := make(chan error, 1) + go func() { + for { + fnames, err := ReadDir(dirname) + if err != nil { + errC <- err + return + } + newfnames := make([]string, 0) + for _, fname := range fnames { + if strings.HasSuffix(fname, suffix) { + newfnames = append(newfnames, fname) + } + } + sort.Strings(newfnames) + fnames = newfnames + for len(newfnames) > int(max) { + f := path.Join(dirname, newfnames[0]) + l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + break + } + if err = os.Remove(f); err != nil { + errC <- err + return + } + if err = l.Close(); err != nil { + plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + errC <- err + return + } + plog.Infof("purged file %s successfully", f) + newfnames = newfnames[1:] + } + if purgec != nil { + for i := 0; i < len(fnames)-len(newfnames); i++ { + purgec <- fnames[i] + } + } + select { + case <-time.After(interval): + case <-stop: + return + } + } + }() + return errC +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go new file mode 100644 index 0000000..54dd41f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go @@ -0,0 +1,29 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform. +func Fdatasync(f *os.File) error { + return f.Sync() +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go new file mode 100644 index 0000000..c2f39bf --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync on HFS/OSX flushes the data on to the physical drive but the drive +// may not write it to the persistent media for quite sometime and it may be +// written in out-of-order sequence. Using F_FULLFSYNC ensures that the +// physical drive's buffer will also get flushed to the media. +func Fsync(f *os.File) error { + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0)) + if errno == 0 { + return nil + } + return errno +} + +// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence +// on physical drive media. +func Fdatasync(f *os.File) error { + return Fsync(f) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go new file mode 100644 index 0000000..1bbced9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go @@ -0,0 +1,34 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is similar to fsync(), but does not flush modified metadata +// unless that metadata is needed in order to allow a subsequent data retrieval +// to be correctly handled. +func Fdatasync(f *os.File) error { + return syscall.Fdatasync(int(f.Fd())) +} diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go new file mode 100644 index 0000000..931beb2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/idutil/id.go @@ -0,0 +1,78 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package idutil implements utility functions for generating unique, +// randomized ids. +package idutil + +import ( + "math" + "sync" + "time" +) + +const ( + tsLen = 5 * 8 + cntLen = 8 + suffixLen = tsLen + cntLen +) + +// Generator generates unique identifiers based on counters, timestamps, and +// a node member ID. +// +// The initial id is in this format: +// High order byte is memberID, next 5 bytes are from timestamp, +// and low order 2 bytes are 0s. +// | prefix | suffix | +// | 2 bytes | 5 bytes | 1 byte | +// | memberID | timestamp | cnt | +// +// The timestamp 5 bytes is different when the machine is restart +// after 1 ms and before 35 years. +// +// It increases suffix to generate the next id. +// The count field may overflow to timestamp field, which is intentional. +// It helps to extend the event window to 2^56. This doesn't break that +// id generated after restart is unique because etcd throughput is << +// 256req/ms(250k reqs/second). +type Generator struct { + mu sync.Mutex + // high order 2 bytes + prefix uint64 + // low order 6 bytes + suffix uint64 +} + +func NewGenerator(memberID uint16, now time.Time) *Generator { + prefix := uint64(memberID) << suffixLen + unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond) + suffix := lowbit(unixMilli, tsLen) << cntLen + return &Generator{ + prefix: prefix, + suffix: suffix, + } +} + +// Next generates a id that is unique. +func (g *Generator) Next() uint64 { + g.mu.Lock() + defer g.mu.Unlock() + g.suffix++ + id := g.prefix | lowbit(g.suffix, suffixLen) + return id +} + +func lowbit(x uint64, n uint) uint64 { + return x & (math.MaxUint64 >> (64 - n)) +} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go new file mode 100644 index 0000000..72de159 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go @@ -0,0 +1,106 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ioutil + +import ( + "io" +) + +var defaultBufferBytes = 128 * 1024 + +// PageWriter implements the io.Writer interface so that writes will +// either be in page chunks or from flushing. +type PageWriter struct { + w io.Writer + // pageOffset tracks the page offset of the base of the buffer + pageOffset int + // pageBytes is the number of bytes per page + pageBytes int + // bufferedBytes counts the number of bytes pending for write in the buffer + bufferedBytes int + // buf holds the write buffer + buf []byte + // bufWatermarkBytes is the number of bytes the buffer can hold before it needs + // to be flushed. It is less than len(buf) so there is space for slack writes + // to bring the writer to page alignment. + bufWatermarkBytes int +} + +// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes +// to write per page. pageOffset is the starting offset of io.Writer. +func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter { + return &PageWriter{ + w: w, + pageOffset: pageOffset, + pageBytes: pageBytes, + buf: make([]byte, defaultBufferBytes+pageBytes), + bufWatermarkBytes: defaultBufferBytes, + } +} + +func (pw *PageWriter) Write(p []byte) (n int, err error) { + if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes { + // no overflow + copy(pw.buf[pw.bufferedBytes:], p) + pw.bufferedBytes += len(p) + return len(p), nil + } + // complete the slack page in the buffer if unaligned + slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes) + if slack != pw.pageBytes { + partial := slack > len(p) + if partial { + // not enough data to complete the slack page + slack = len(p) + } + // special case: writing to slack page in buffer + copy(pw.buf[pw.bufferedBytes:], p[:slack]) + pw.bufferedBytes += slack + n = slack + p = p[slack:] + if partial { + // avoid forcing an unaligned flush + return n, nil + } + } + // buffer contents are now page-aligned; clear out + if err = pw.Flush(); err != nil { + return n, err + } + // directly write all complete pages without copying + if len(p) > pw.pageBytes { + pages := len(p) / pw.pageBytes + c, werr := pw.w.Write(p[:pages*pw.pageBytes]) + n += c + if werr != nil { + return n, werr + } + p = p[pages*pw.pageBytes:] + } + // write remaining tail to buffer + c, werr := pw.Write(p) + n += c + return n, werr +} + +func (pw *PageWriter) Flush() error { + if pw.bufferedBytes == 0 { + return nil + } + _, err := pw.w.Write(pw.buf[:pw.bufferedBytes]) + pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes + pw.bufferedBytes = 0 + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go b/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go new file mode 100644 index 0000000..d3efcfe --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go @@ -0,0 +1,66 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ioutil + +import ( + "fmt" + "io" +) + +// ReaderAndCloser implements io.ReadCloser interface by combining +// reader and closer together. +type ReaderAndCloser struct { + io.Reader + io.Closer +} + +var ( + ErrShortRead = fmt.Errorf("ioutil: short read") + ErrExpectEOF = fmt.Errorf("ioutil: expect EOF") +) + +// NewExactReadCloser returns a ReadCloser that returns errors if the underlying +// reader does not read back exactly the requested number of bytes. +func NewExactReadCloser(rc io.ReadCloser, totalBytes int64) io.ReadCloser { + return &exactReadCloser{rc: rc, totalBytes: totalBytes} +} + +type exactReadCloser struct { + rc io.ReadCloser + br int64 + totalBytes int64 +} + +func (e *exactReadCloser) Read(p []byte) (int, error) { + n, err := e.rc.Read(p) + e.br += int64(n) + if e.br > e.totalBytes { + return 0, ErrExpectEOF + } + if e.br < e.totalBytes && n == 0 { + return 0, ErrShortRead + } + return n, err +} + +func (e *exactReadCloser) Close() error { + if err := e.rc.Close(); err != nil { + return err + } + if e.br < e.totalBytes { + return ErrShortRead + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go b/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go new file mode 100644 index 0000000..0703ed4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ioutil implements I/O utility functions. +package ioutil + +import "io" + +// NewLimitedBufferReader returns a reader that reads from the given reader +// but limits the amount of data returned to at most n bytes. +func NewLimitedBufferReader(r io.Reader, n int) io.Reader { + return &limitedBufferReader{ + r: r, + n: n, + } +} + +type limitedBufferReader struct { + r io.Reader + n int +} + +func (r *limitedBufferReader) Read(p []byte) (n int, err error) { + np := p + if len(np) > r.n { + np = np[:r.n] + } + return r.r.Read(np) +} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go new file mode 100644 index 0000000..192ad88 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go @@ -0,0 +1,43 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ioutil + +import ( + "io" + "os" + + "github.com/coreos/etcd/pkg/fileutil" +) + +// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library, +// but calls Sync before closing the file. WriteAndSyncFile guarantees the data +// is synced if there is no error returned. +func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err == nil { + err = fileutil.Fsync(f) + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go new file mode 100644 index 0000000..d70f98d --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go @@ -0,0 +1,60 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil defines interfaces for handling Protocol Buffer objects. +package pbutil + +import "github.com/coreos/pkg/capnslog" + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil") +) + +type Marshaler interface { + Marshal() (data []byte, err error) +} + +type Unmarshaler interface { + Unmarshal(data []byte) error +} + +func MustMarshal(m Marshaler) []byte { + d, err := m.Marshal() + if err != nil { + plog.Panicf("marshal should never fail (%v)", err) + } + return d +} + +func MustUnmarshal(um Unmarshaler, data []byte) { + if err := um.Unmarshal(data); err != nil { + plog.Panicf("unmarshal should never fail (%v)", err) + } +} + +func MaybeUnmarshal(um Unmarshaler, data []byte) bool { + if err := um.Unmarshal(data); err != nil { + return false + } + return true +} + +func GetBool(v *bool) (vv bool, set bool) { + if v == nil { + return false, false + } + return *v, true +} + +func Boolp(b bool) *bool { return &b } diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md new file mode 100644 index 0000000..a724b95 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/README.md @@ -0,0 +1,247 @@ +# Raft library + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. + +This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more. + +Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance. + +To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state. + +In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. + +A simple example application, _raftexample_, is also available to help illustrate +how to use this package in practice: +https://github.com/coreos/etcd/tree/master/contrib/raftexample + +# Features + +This raft implementation is a full feature implementation of Raft protocol. Features includes: + +- Leader election +- Log replication +- Log compaction +- Membership changes +- Leadership transfer extension +- Efficient linearizable read-only queries served by both the leader and followers + - leader checks with quorum and bypasses Raft log before processing read-only queries + - followers asks leader to get a safe read index before processing read-only queries +- More efficient lease-based linearizable read-only queries served by both the leader and followers + - leader bypasses Raft log and processing read-only queries locally + - followers asks leader to get a safe read index before processing read-only queries + - this approach relies on the clock of the all the machines in raft group + +This raft implementation also includes a few optional enhancements: + +- Optimistic pipelining to reduce log replication latency +- Flow control for log replication +- Batching Raft messages to reduce synchronized network I/O calls +- Batching log entries to reduce disk synchronized I/O +- Writing to leader's disk in parallel +- Internal proposal redirection from followers to leader +- Automatic stepping down when the leader loses quorum + +## Notable Users + +- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database +- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database +- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store +- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft +- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. + +## Usage + +The primary object in raft is a Node. You either start a Node from scratch +using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a three-node cluster +```go + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + // Set peer list to the other nodes in the cluster. + // Note that they need to be started separately as well. + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) +``` + +You can start a single node cluster, like so: +```go + // Create storage and config as shown above. + // Set peer list to itself, so this node can become the leader of this single-node cluster. + peers := []raft.Peer{{ID: 0x01}} + n := raft.StartNode(c, peers) +``` + +To allow a new node to join this cluster, do not pass in any peers. First, you need add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, you can start the node with empty peer list, like so: +```go + // Create storage and config as shown above. + n := raft.StartNode(c, nil) +``` + +To restart a node from previous state: +```go + storage := raft.NewMemoryStorage() + + // Recover the in-memory storage from persistent snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // Restart raft without peer information. + // Peer information is already included in the storage. + n := raft.RestartNode(c) +``` + +Now that you are holding onto a Node you have a few responsibilities: + +First, you must read from the Node.Ready() channel and process the updates +it contains. These steps may be performed in parallel, except as noted in step +2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are +not empty. Note that when writing an Entry with Index i, any +previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that +no messages be sent until the latest HardState has been persisted to disk, +and all Entries written by any previous Ready batch (Messages may be sent while +entries from the same batch are being persisted). To reduce the I/O latency, an +optimization can be applied to make leader write to disk in parallel with its +followers (as explained at section 10.2.1 in Raft thesis). If any Message has type +MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be +large). Note: Marshalling messages is not thread-safe; it is important that you +make sure that no new entries are persisted while marshalling. +The easiest way to achieve this is to serialise the messages directly inside +your main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. +If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() +to apply it to the node. The configuration change may be cancelled at this point +by setting the NodeID field to zero before calling ApplyConfChange +(but ApplyConfChange must be called one way or the other, and the decision to cancel +must be based solely on the state machine and not external information such as +the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. +This may be done at any time after step 1, although all updates must be processed +in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an +implementation of the Storage interface. The provided MemoryStorage +type can be used for this (if you repopulate its state upon a +restart), or you can supply your own disk-backed implementation. + +Third, when you receive a message from another node, pass it to Node.Step: + +```go + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } +``` + +Finally, you need to call `Node.Tick()` at regular intervals (probably +via a `time.Ticker`). Raft has two important timeouts: heartbeat and the +election timeout. However, internally to the raft package time is +represented by an abstract "tick". + +The total state machine handling loop will look something like this: + +```go + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.State, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } +``` + +To propose changes to the state machine from your node take your application +data, serialize it into a byte slice and call: + +```go + n.Propose(ctx, data) +``` + +If the proposal is committed, data will appear in committed entries with type +raftpb.EntryNormal. There is no guarantee that a proposed command will be +committed; you may have to re-propose after a timeout. + +To add or remove node in a cluster, build ConfChange struct 'cc' and call: + +```go + n.ProposeConfChange(ctx, cc) +``` + +After config change is committed, some committed entry with type +raftpb.EntryConfChange will be returned. You must apply it to node through: + +```go + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) +``` + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +## Implementation notes + +This implementation is up to date with the final Raft thesis +(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our +implementation of the membership change protocol differs somewhat from +that described in chapter 4. The key invariant that membership changes +happen one node at a time is preserved, but in our implementation the +membership change takes effect when its entry is applied, not when it +is added to the log (so the entry is committed under the old +membership instead of the new). This is equivalent in terms of safety, +since the old and new configurations are guaranteed to overlap. + +To ensure that we do not attempt to commit two membership changes at +once by matching log positions (which would be unsafe since they +should have different quorum requirements), we simply disallow any +proposed membership change while any uncommitted change appears in +the leader's log. + +This approach introduces a problem when you try to remove a member +from a two-member cluster: If one of the members dies before the +other one receives the commit of the confchange entry, then the member +cannot be removed any more since the cluster cannot make progress. +For this reason it is highly recommended to use three or more nodes in +every cluster. diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go new file mode 100644 index 0000000..b55c591 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/doc.go @@ -0,0 +1,300 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package raft sends and receives messages in the Protocol Buffer format +defined in the raftpb package. + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. + +A simple example application, _raftexample_, is also available to help illustrate +how to use this package in practice: +https://github.com/coreos/etcd/tree/master/contrib/raftexample + +Usage + +The primary object in raft is a Node. You either start a Node from scratch +using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a node from scratch: + + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) + +To restart a node from previous state: + + storage := raft.NewMemoryStorage() + + // recover the in-memory storage from persistent + // snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // restart raft without peer information. + // peer information is already included in the storage. + n := raft.RestartNode(c) + +Now that you are holding onto a Node you have a few responsibilities: + +First, you must read from the Node.Ready() channel and process the updates +it contains. These steps may be performed in parallel, except as noted in step +2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are +not empty. Note that when writing an Entry with Index i, any +previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that +no messages be sent until the latest HardState has been persisted to disk, +and all Entries written by any previous Ready batch (Messages may be sent while +entries from the same batch are being persisted). To reduce the I/O latency, an +optimization can be applied to make leader write to disk in parallel with its +followers (as explained at section 10.2.1 in Raft thesis). If any Message has type +MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be +large). + +Note: Marshalling messages is not thread-safe; it is important that you +make sure that no new entries are persisted while marshalling. +The easiest way to achieve this is to serialise the messages directly inside +your main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. +If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() +to apply it to the node. The configuration change may be cancelled at this point +by setting the NodeID field to zero before calling ApplyConfChange +(but ApplyConfChange must be called one way or the other, and the decision to cancel +must be based solely on the state machine and not external information such as +the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. +This may be done at any time after step 1, although all updates must be processed +in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an +implementation of the Storage interface. The provided MemoryStorage +type can be used for this (if you repopulate its state upon a +restart), or you can supply your own disk-backed implementation. + +Third, when you receive a message from another node, pass it to Node.Step: + + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } + +Finally, you need to call Node.Tick() at regular intervals (probably +via a time.Ticker). Raft has two important timeouts: heartbeat and the +election timeout. However, internally to the raft package time is +represented by an abstract "tick". + +The total state machine handling loop will look something like this: + + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.State, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } + +To propose changes to the state machine from your node take your application +data, serialize it into a byte slice and call: + + n.Propose(ctx, data) + +If the proposal is committed, data will appear in committed entries with type +raftpb.EntryNormal. There is no guarantee that a proposed command will be +committed; you may have to re-propose after a timeout. + +To add or remove node in a cluster, build ConfChange struct 'cc' and call: + + n.ProposeConfChange(ctx, cc) + +After config change is committed, some committed entry with type +raftpb.EntryConfChange will be returned. You must apply it to node through: + + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +Implementation notes + +This implementation is up to date with the final Raft thesis +(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our +implementation of the membership change protocol differs somewhat from +that described in chapter 4. The key invariant that membership changes +happen one node at a time is preserved, but in our implementation the +membership change takes effect when its entry is applied, not when it +is added to the log (so the entry is committed under the old +membership instead of the new). This is equivalent in terms of safety, +since the old and new configurations are guaranteed to overlap. + +To ensure that we do not attempt to commit two membership changes at +once by matching log positions (which would be unsafe since they +should have different quorum requirements), we simply disallow any +proposed membership change while any uncommitted change appears in +the leader's log. + +This approach introduces a problem when you try to remove a member +from a two-member cluster: If one of the members dies before the +other one receives the commit of the confchange entry, then the member +cannot be removed any more since the cluster cannot make progress. +For this reason it is highly recommended to use three or more nodes in +every cluster. + +MessageType + +Package raft sends and receives message in Protocol Buffer format (defined +in raftpb package). Each state (follower, candidate, leader) implements its +own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when +advancing with the given raftpb.Message. Each step is determined by its +raftpb.MessageType. Note that every step is checked by one common method +'Step' that safety-checks the terms of node and incoming message to prevent +stale log entries: + + 'MsgHup' is used for election. If a node is a follower or candidate, the + 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or + candidate has not received any heartbeat before the election timeout, it + passes 'MsgHup' to its Step method and becomes (or remains) a candidate to + start a new election. + + 'MsgBeat' is an internal type that signals the leader to send a heartbeat of + the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in + the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to + send periodic 'MsgHeartbeat' messages to its followers. + + 'MsgProp' proposes to append data to its log entries. This is a special + type to redirect proposals to leader. Therefore, send method overwrites + raftpb.Message's term with its HardState's term to avoid attaching its + local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step' + method, the leader first calls the 'appendEntry' method to append entries + to its log, and then calls 'bcastAppend' method to send those entries to + its peers. When passed to candidate, 'MsgProp' is dropped. When passed to + follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send + method. It is stored with sender's ID and later forwarded to leader by + rafthttp package. + + 'MsgApp' contains log entries to replicate. A leader calls bcastAppend, + which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp' + type. When 'MsgApp' is passed to candidate's Step method, candidate reverts + back to follower, because it indicates that there is a valid leader sending + 'MsgApp' messages. Candidate and follower respond to this message in + 'MsgAppResp' type. + + 'MsgAppResp' is response to log replication request('MsgApp'). When + 'MsgApp' is passed to candidate or follower's Step method, it responds by + calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft + mailbox. + + 'MsgVote' requests votes for election. When a node is a follower or + candidate and 'MsgHup' is passed to its Step method, then the node calls + 'campaign' method to campaign itself to become a leader. Once 'campaign' + method is called, the node becomes candidate and sends 'MsgVote' to peers + in cluster to request votes. When passed to leader or candidate's Step + method and the message's Term is lower than leader's or candidate's, + 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true). + If leader or candidate receives 'MsgVote' with higher term, it will revert + back to follower. When 'MsgVote' is passed to follower, it votes for the + sender only when sender's last term is greater than MsgVote's term or + sender's last term is equal to MsgVote's term but sender's last committed + index is greater than or equal to follower's. + + 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is + passed to candidate, the candidate calculates how many votes it has won. If + it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. + If candidate receives majority of votes of denials, it reverts back to + follower. + + 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election + protocol. When Config.PreVote is true, a pre-election is carried out first + (using the same rules as a regular election), and no node increases its term + number unless the pre-election indicates that the campaigining node would win. + This minimizes disruption when a partitioned node rejoins the cluster. + + 'MsgSnap' requests to install a snapshot message. When a node has just + become a leader or the leader receives 'MsgProp' message, it calls + 'bcastAppend' method, which then calls 'sendAppend' method to each + follower. In 'sendAppend', if a leader fails to get term or entries, + the leader requests snapshot by sending 'MsgSnap' type message. + + 'MsgSnapStatus' tells the result of snapshot install message. When a + follower rejected 'MsgSnap', it indicates the snapshot request with + 'MsgSnap' had failed from network issues which causes the network layer + to fail to send out snapshots to its followers. Then leader considers + follower's progress as probe. When 'MsgSnap' were not rejected, it + indicates that the snapshot succeeded and the leader sets follower's + progress to probe and resumes its log replication. + + 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed + to candidate and message's term is higher than candidate's, the candidate + reverts back to follower and updates its committed index from the one in + this heartbeat. And it sends the message to its mailbox. When + 'MsgHeartbeat' is passed to follower's Step method and message's term is + higher than follower's, the follower updates its leaderID with the ID + from the message. + + 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp' + is passed to leader's Step method, the leader knows which follower + responded. And only when the leader's last committed index is greater than + follower's Match index, the leader runs 'sendAppend` method. + + 'MsgUnreachable' tells that request(message) wasn't delivered. When + 'MsgUnreachable' is passed to leader's Step method, the leader discovers + that the follower that sent this 'MsgUnreachable' is not reachable, often + indicating 'MsgApp' is lost. When follower's progress state is replicate, + the leader sets it back to probe. + +*/ +package raft diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go new file mode 100644 index 0000000..c3036d3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/log.go @@ -0,0 +1,358 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "log" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +type raftLog struct { + // storage contains all stable entries since the last snapshot. + storage Storage + + // unstable contains all unstable entries and snapshot. + // they will be saved into storage. + unstable unstable + + // committed is the highest log position that is known to be in + // stable storage on a quorum of nodes. + committed uint64 + // applied is the highest log position that the application has + // been instructed to apply to its state machine. + // Invariant: applied <= committed + applied uint64 + + logger Logger +} + +// newLog returns log using the given storage. It recovers the log to the state +// that it just commits and applies the latest snapshot. +func newLog(storage Storage, logger Logger) *raftLog { + if storage == nil { + log.Panic("storage must not be nil") + } + log := &raftLog{ + storage: storage, + logger: logger, + } + firstIndex, err := storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + lastIndex, err := storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + log.unstable.offset = lastIndex + 1 + log.unstable.logger = logger + // Initialize our committed and applied pointers to the time of the last compaction. + log.committed = firstIndex - 1 + log.applied = firstIndex - 1 + + return log +} + +func (l *raftLog) String() string { + return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) +} + +// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, +// it returns (last index of new entries, true). +func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { + if l.matchTerm(index, logTerm) { + lastnewi = index + uint64(len(ents)) + ci := l.findConflict(ents) + switch { + case ci == 0: + case ci <= l.committed: + l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) + default: + offset := index + 1 + l.append(ents[ci-offset:]...) + } + l.commitTo(min(committed, lastnewi)) + return lastnewi, true + } + return 0, false +} + +func (l *raftLog) append(ents ...pb.Entry) uint64 { + if len(ents) == 0 { + return l.lastIndex() + } + if after := ents[0].Index - 1; after < l.committed { + l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) + } + l.unstable.truncateAndAppend(ents) + return l.lastIndex() +} + +// findConflict finds the index of the conflict. +// It returns the first pair of conflicting entries between the existing +// entries and the given entries, if there are any. +// If there is no conflicting entries, and the existing entries contains +// all the given entries, zero will be returned. +// If there is no conflicting entries, but the given entries contains new +// entries, the index of the first new entry will be returned. +// An entry is considered to be conflicting if it has the same index but +// a different term. +// The first entry MUST have an index equal to the argument 'from'. +// The index of the given entries MUST be continuously increasing. +func (l *raftLog) findConflict(ents []pb.Entry) uint64 { + for _, ne := range ents { + if !l.matchTerm(ne.Index, ne.Term) { + if ne.Index <= l.lastIndex() { + l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", + ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) + } + return ne.Index + } + } + return 0 +} + +func (l *raftLog) unstableEntries() []pb.Entry { + if len(l.unstable.entries) == 0 { + return nil + } + return l.unstable.entries +} + +// nextEnts returns all the available entries for execution. +// If applied is smaller than the index of snapshot, it returns all committed +// entries after the index of snapshot. +func (l *raftLog) nextEnts() (ents []pb.Entry) { + off := max(l.applied+1, l.firstIndex()) + if l.committed+1 > off { + ents, err := l.slice(off, l.committed+1, noLimit) + if err != nil { + l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) + } + return ents + } + return nil +} + +// hasNextEnts returns if there is any available entries for execution. This +// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). +func (l *raftLog) hasNextEnts() bool { + off := max(l.applied+1, l.firstIndex()) + return l.committed+1 > off +} + +func (l *raftLog) snapshot() (pb.Snapshot, error) { + if l.unstable.snapshot != nil { + return *l.unstable.snapshot, nil + } + return l.storage.Snapshot() +} + +func (l *raftLog) firstIndex() uint64 { + if i, ok := l.unstable.maybeFirstIndex(); ok { + return i + } + index, err := l.storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return index +} + +func (l *raftLog) lastIndex() uint64 { + if i, ok := l.unstable.maybeLastIndex(); ok { + return i + } + i, err := l.storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return i +} + +func (l *raftLog) commitTo(tocommit uint64) { + // never decrease commit + if l.committed < tocommit { + if l.lastIndex() < tocommit { + l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) + } + l.committed = tocommit + } +} + +func (l *raftLog) appliedTo(i uint64) { + if i == 0 { + return + } + if l.committed < i || i < l.applied { + l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) + } + l.applied = i +} + +func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } + +func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } + +func (l *raftLog) lastTerm() uint64 { + t, err := l.term(l.lastIndex()) + if err != nil { + l.logger.Panicf("unexpected error when getting the last term (%v)", err) + } + return t +} + +func (l *raftLog) term(i uint64) (uint64, error) { + // the valid term range is [index of dummy entry, last index] + dummyIndex := l.firstIndex() - 1 + if i < dummyIndex || i > l.lastIndex() { + // TODO: return an error instead? + return 0, nil + } + + if t, ok := l.unstable.maybeTerm(i); ok { + return t, nil + } + + t, err := l.storage.Term(i) + if err == nil { + return t, nil + } + if err == ErrCompacted || err == ErrUnavailable { + return 0, err + } + panic(err) // TODO(bdarnell) +} + +func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { + if i > l.lastIndex() { + return nil, nil + } + return l.slice(i, l.lastIndex()+1, maxsize) +} + +// allEntries returns all entries in the log. +func (l *raftLog) allEntries() []pb.Entry { + ents, err := l.entries(l.firstIndex(), noLimit) + if err == nil { + return ents + } + if err == ErrCompacted { // try again if there was a racing compaction + return l.allEntries() + } + // TODO (xiangli): handle error? + panic(err) +} + +// isUpToDate determines if the given (lastIndex,term) log is more up-to-date +// by comparing the index and term of the last entries in the existing logs. +// If the logs have last entries with different terms, then the log with the +// later term is more up-to-date. If the logs end with the same term, then +// whichever log has the larger lastIndex is more up-to-date. If the logs are +// the same, the given log is up-to-date. +func (l *raftLog) isUpToDate(lasti, term uint64) bool { + return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) +} + +func (l *raftLog) matchTerm(i, term uint64) bool { + t, err := l.term(i) + if err != nil { + return false + } + return t == term +} + +func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { + if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { + l.commitTo(maxIndex) + return true + } + return false +} + +func (l *raftLog) restore(s pb.Snapshot) { + l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) + l.committed = s.Metadata.Index + l.unstable.restore(s) +} + +// slice returns a slice of log entries from lo through hi-1, inclusive. +func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { + err := l.mustCheckOutOfBounds(lo, hi) + if err != nil { + return nil, err + } + if lo == hi { + return nil, nil + } + var ents []pb.Entry + if lo < l.unstable.offset { + storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) + if err == ErrCompacted { + return nil, err + } else if err == ErrUnavailable { + l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) + } else if err != nil { + panic(err) // TODO(bdarnell) + } + + // check if ents has reached the size limitation + if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { + return storedEnts, nil + } + + ents = storedEnts + } + if hi > l.unstable.offset { + unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) + if len(ents) > 0 { + ents = append([]pb.Entry{}, ents...) + ents = append(ents, unstable...) + } else { + ents = unstable + } + } + return limitSize(ents, maxSize), nil +} + +// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) +func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { + if lo > hi { + l.logger.Panicf("invalid slice %d > %d", lo, hi) + } + fi := l.firstIndex() + if lo < fi { + return ErrCompacted + } + + length := l.lastIndex() + 1 - fi + if lo < fi || hi > fi+length { + l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) + } + return nil +} + +func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { + if err == nil { + return t + } + if err == ErrCompacted { + return 0 + } + l.logger.Panicf("unexpected error (%v)", err) + return 0 +} diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go new file mode 100644 index 0000000..8ae301c --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go @@ -0,0 +1,139 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "github.com/coreos/etcd/raft/raftpb" + +// unstable.entries[i] has raft log position i+unstable.offset. +// Note that unstable.offset may be less than the highest log +// position in storage; this means that the next write to storage +// might need to truncate the log before persisting unstable.entries. +type unstable struct { + // the incoming unstable snapshot, if any. + snapshot *pb.Snapshot + // all entries that have not yet been written to storage. + entries []pb.Entry + offset uint64 + + logger Logger +} + +// maybeFirstIndex returns the index of the first possible entry in entries +// if it has a snapshot. +func (u *unstable) maybeFirstIndex() (uint64, bool) { + if u.snapshot != nil { + return u.snapshot.Metadata.Index + 1, true + } + return 0, false +} + +// maybeLastIndex returns the last index if it has at least one +// unstable entry or snapshot. +func (u *unstable) maybeLastIndex() (uint64, bool) { + if l := len(u.entries); l != 0 { + return u.offset + uint64(l) - 1, true + } + if u.snapshot != nil { + return u.snapshot.Metadata.Index, true + } + return 0, false +} + +// maybeTerm returns the term of the entry at index i, if there +// is any. +func (u *unstable) maybeTerm(i uint64) (uint64, bool) { + if i < u.offset { + if u.snapshot == nil { + return 0, false + } + if u.snapshot.Metadata.Index == i { + return u.snapshot.Metadata.Term, true + } + return 0, false + } + + last, ok := u.maybeLastIndex() + if !ok { + return 0, false + } + if i > last { + return 0, false + } + return u.entries[i-u.offset].Term, true +} + +func (u *unstable) stableTo(i, t uint64) { + gt, ok := u.maybeTerm(i) + if !ok { + return + } + // if i < offset, term is matched with the snapshot + // only update the unstable entries if term is matched with + // an unstable entry. + if gt == t && i >= u.offset { + u.entries = u.entries[i+1-u.offset:] + u.offset = i + 1 + } +} + +func (u *unstable) stableSnapTo(i uint64) { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + u.snapshot = nil + } +} + +func (u *unstable) restore(s pb.Snapshot) { + u.offset = s.Metadata.Index + 1 + u.entries = nil + u.snapshot = &s +} + +func (u *unstable) truncateAndAppend(ents []pb.Entry) { + after := ents[0].Index + switch { + case after == u.offset+uint64(len(u.entries)): + // after is the next index in the u.entries + // directly append + u.entries = append(u.entries, ents...) + case after <= u.offset: + u.logger.Infof("replace the unstable entries from index %d", after) + // The log is being truncated to before our current offset + // portion, so set the offset and replace the entries + u.offset = after + u.entries = ents + default: + // truncate to after and copy to u.entries + // then append + u.logger.Infof("truncate the unstable entries before index %d", after) + u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...) + u.entries = append(u.entries, ents...) + } +} + +func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { + u.mustCheckOutOfBounds(lo, hi) + return u.entries[lo-u.offset : hi-u.offset] +} + +// u.offset <= lo <= hi <= u.offset+len(u.offset) +func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { + if lo > hi { + u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) + } + upper := u.offset + uint64(len(u.entries)) + if lo < u.offset || hi > upper { + u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) + } +} diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go new file mode 100644 index 0000000..92e55b3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/logger.go @@ -0,0 +1,126 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "io/ioutil" + "log" + "os" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func SetLogger(l Logger) { raftLogger = l } + +var ( + defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} + discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + raftLogger = Logger(defaultLogger) +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go new file mode 100644 index 0000000..c8410fd --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/node.go @@ -0,0 +1,521 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/coreos/etcd/raft/raftpb" + "golang.org/x/net/context" +) + +type SnapshotStatus int + +const ( + SnapshotFinish SnapshotStatus = 1 + SnapshotFailure SnapshotStatus = 2 +) + +var ( + emptyState = pb.HardState{} + + // ErrStopped is returned by methods on Nodes that have been stopped. + ErrStopped = errors.New("raft: stopped") +) + +// SoftState provides state that is useful for logging and debugging. +// The state is volatile and does not need to be persisted to the WAL. +type SoftState struct { + Lead uint64 // must use atomic operations to access; keep 64-bit aligned. + RaftState StateType +} + +func (a *SoftState) equal(b *SoftState) bool { + return a.Lead == b.Lead && a.RaftState == b.RaftState +} + +// Ready encapsulates the entries and messages that are ready to read, +// be saved to stable storage, committed or sent to other peers. +// All fields in Ready are read-only. +type Ready struct { + // The current volatile state of a Node. + // SoftState will be nil if there is no update. + // It is not required to consume or store SoftState. + *SoftState + + // The current state of a Node to be saved to stable storage BEFORE + // Messages are sent. + // HardState will be equal to empty state if there is no update. + pb.HardState + + // ReadStates can be used for node to serve linearizable read requests locally + // when its applied index is greater than the index in ReadState. + // Note that the readState will be returned when raft receives msgReadIndex. + // The returned is only valid for the request that requested to read. + ReadStates []ReadState + + // Entries specifies entries to be saved to stable storage BEFORE + // Messages are sent. + Entries []pb.Entry + + // Snapshot specifies the snapshot to be saved to stable storage. + Snapshot pb.Snapshot + + // CommittedEntries specifies entries to be committed to a + // store/state-machine. These have previously been committed to stable + // store. + CommittedEntries []pb.Entry + + // Messages specifies outbound messages to be sent AFTER Entries are + // committed to stable storage. + // If it contains a MsgSnap message, the application MUST report back to raft + // when the snapshot has been received or has failed by calling ReportSnapshot. + Messages []pb.Message +} + +func isHardStateEqual(a, b pb.HardState) bool { + return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit +} + +// IsEmptyHardState returns true if the given HardState is empty. +func IsEmptyHardState(st pb.HardState) bool { + return isHardStateEqual(st, emptyState) +} + +// IsEmptySnap returns true if the given Snapshot is empty. +func IsEmptySnap(sp pb.Snapshot) bool { + return sp.Metadata.Index == 0 +} + +func (rd Ready) containsUpdates() bool { + return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || + !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || + len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 +} + +// Node represents a node in a raft cluster. +type Node interface { + // Tick increments the internal logical clock for the Node by a single tick. Election + // timeouts and heartbeat timeouts are in units of ticks. + Tick() + // Campaign causes the Node to transition to candidate state and start campaigning to become leader. + Campaign(ctx context.Context) error + // Propose proposes that data be appended to the log. + Propose(ctx context.Context, data []byte) error + // ProposeConfChange proposes config change. + // At most one ConfChange can be in the process of going through consensus. + // Application needs to call ApplyConfChange when applying EntryConfChange type entry. + ProposeConfChange(ctx context.Context, cc pb.ConfChange) error + // Step advances the state machine using the given message. ctx.Err() will be returned, if any. + Step(ctx context.Context, msg pb.Message) error + + // Ready returns a channel that returns the current point-in-time state. + // Users of the Node must call Advance after retrieving the state returned by Ready. + // + // NOTE: No committed entries from the next Ready may be applied until all committed entries + // and snapshots from the previous one have finished. + Ready() <-chan Ready + + // Advance notifies the Node that the application has saved progress up to the last Ready. + // It prepares the node to return the next available Ready. + // + // The application should generally call Advance after it applies the entries in last Ready. + // + // However, as an optimization, the application may call Advance while it is applying the + // commands. For example. when the last Ready contains a snapshot, the application might take + // a long time to apply the snapshot data. To continue receiving Ready without blocking raft + // progress, it can call Advance before finishing applying the last ready. + Advance() + // ApplyConfChange applies config change to the local node. + // Returns an opaque ConfState protobuf which must be recorded + // in snapshots. Will never return nil; it returns a pointer only + // to match MemoryStorage.Compact. + ApplyConfChange(cc pb.ConfChange) *pb.ConfState + + // TransferLeadership attempts to transfer leadership to the given transferee. + TransferLeadership(ctx context.Context, lead, transferee uint64) + + // ReadIndex request a read state. The read state will be set in the ready. + // Read state has a read index. Once the application advances further than the read + // index, any linearizable read requests issued before the read request can be + // processed safely. The read state will have the same rctx attached. + ReadIndex(ctx context.Context, rctx []byte) error + + // Status returns the current status of the raft state machine. + Status() Status + // ReportUnreachable reports the given node is not reachable for the last send. + ReportUnreachable(id uint64) + // ReportSnapshot reports the status of the sent snapshot. + ReportSnapshot(id uint64, status SnapshotStatus) + // Stop performs any necessary termination of the Node. + Stop() +} + +type Peer struct { + ID uint64 + Context []byte +} + +// StartNode returns a new Node given configuration and a list of raft peers. +// It appends a ConfChangeAddNode entry for each given peer to the initial log. +func StartNode(c *Config, peers []Peer) Node { + r := newRaft(c) + // become the follower at term 1 and apply initial configuration + // entries of term 1 + r.becomeFollower(1, None) + for _, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + d, err := cc.Marshal() + if err != nil { + panic("unexpected marshal error") + } + e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d} + r.raftLog.append(e) + } + // Mark these initial entries as committed. + // TODO(bdarnell): These entries are still unstable; do we need to preserve + // the invariant that committed < unstable? + r.raftLog.committed = r.raftLog.lastIndex() + // Now apply them, mainly so that the application can call Campaign + // immediately after StartNode in tests. Note that these nodes will + // be added to raft twice: here and when the application's Ready + // loop calls ApplyConfChange. The calls to addNode must come after + // all calls to raftLog.append so progress.next is set after these + // bootstrapping entries (it is an error if we try to append these + // entries since they have already been committed). + // We do not set raftLog.applied so the application will be able + // to observe all conf changes via Ready.CommittedEntries. + for _, peer := range peers { + r.addNode(peer.ID) + } + + n := newNode() + n.logger = c.Logger + go n.run(r) + return &n +} + +// RestartNode is similar to StartNode but does not take a list of peers. +// The current membership of the cluster will be restored from the Storage. +// If the caller has an existing state machine, pass in the last log index that +// has been applied to it; otherwise use zero. +func RestartNode(c *Config) Node { + r := newRaft(c) + + n := newNode() + n.logger = c.Logger + go n.run(r) + return &n +} + +// node is the canonical implementation of the Node interface +type node struct { + propc chan pb.Message + recvc chan pb.Message + confc chan pb.ConfChange + confstatec chan pb.ConfState + readyc chan Ready + advancec chan struct{} + tickc chan struct{} + done chan struct{} + stop chan struct{} + status chan chan Status + + logger Logger +} + +func newNode() node { + return node{ + propc: make(chan pb.Message), + recvc: make(chan pb.Message), + confc: make(chan pb.ConfChange), + confstatec: make(chan pb.ConfState), + readyc: make(chan Ready), + advancec: make(chan struct{}), + // make tickc a buffered chan, so raft node can buffer some ticks when the node + // is busy processing raft messages. Raft node will resume process buffered + // ticks when it becomes idle. + tickc: make(chan struct{}, 128), + done: make(chan struct{}), + stop: make(chan struct{}), + status: make(chan chan Status), + } +} + +func (n *node) Stop() { + select { + case n.stop <- struct{}{}: + // Not already stopped, so trigger it + case <-n.done: + // Node has already been stopped - no need to do anything + return + } + // Block until the stop has been acknowledged by run() + <-n.done +} + +func (n *node) run(r *raft) { + var propc chan pb.Message + var readyc chan Ready + var advancec chan struct{} + var prevLastUnstablei, prevLastUnstablet uint64 + var havePrevLastUnstablei bool + var prevSnapi uint64 + var rd Ready + + lead := None + prevSoftSt := r.softState() + prevHardSt := emptyState + + for { + if advancec != nil { + readyc = nil + } else { + rd = newReady(r, prevSoftSt, prevHardSt) + if rd.containsUpdates() { + readyc = n.readyc + } else { + readyc = nil + } + } + + if lead != r.lead { + if r.hasLeader() { + if lead == None { + r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term) + } else { + r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term) + } + propc = n.propc + } else { + r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term) + propc = nil + } + lead = r.lead + } + + select { + // TODO: maybe buffer the config propose if there exists one (the way + // described in raft dissertation) + // Currently it is dropped in Step silently. + case m := <-propc: + m.From = r.id + r.Step(m) + case m := <-n.recvc: + // filter out response message from unknown From. + if _, ok := r.prs[m.From]; ok || !IsResponseMsg(m.Type) { + r.Step(m) // raft never returns an error + } + case cc := <-n.confc: + if cc.NodeID == None { + r.resetPendingConf() + select { + case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: + case <-n.done: + } + break + } + switch cc.Type { + case pb.ConfChangeAddNode: + r.addNode(cc.NodeID) + case pb.ConfChangeRemoveNode: + // block incoming proposal when local node is + // removed + if cc.NodeID == r.id { + propc = nil + } + r.removeNode(cc.NodeID) + case pb.ConfChangeUpdateNode: + r.resetPendingConf() + default: + panic("unexpected conf type") + } + select { + case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: + case <-n.done: + } + case <-n.tickc: + r.tick() + case readyc <- rd: + if rd.SoftState != nil { + prevSoftSt = rd.SoftState + } + if len(rd.Entries) > 0 { + prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index + prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term + havePrevLastUnstablei = true + } + if !IsEmptyHardState(rd.HardState) { + prevHardSt = rd.HardState + } + if !IsEmptySnap(rd.Snapshot) { + prevSnapi = rd.Snapshot.Metadata.Index + } + + r.msgs = nil + r.readStates = nil + advancec = n.advancec + case <-advancec: + if prevHardSt.Commit != 0 { + r.raftLog.appliedTo(prevHardSt.Commit) + } + if havePrevLastUnstablei { + r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet) + havePrevLastUnstablei = false + } + r.raftLog.stableSnapTo(prevSnapi) + advancec = nil + case c := <-n.status: + c <- getStatus(r) + case <-n.stop: + close(n.done) + return + } + } +} + +// Tick increments the internal logical clock for this Node. Election timeouts +// and heartbeat timeouts are in units of ticks. +func (n *node) Tick() { + select { + case n.tickc <- struct{}{}: + case <-n.done: + default: + n.logger.Warningf("A tick missed to fire. Node blocks too long!") + } +} + +func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } + +func (n *node) Propose(ctx context.Context, data []byte) error { + return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) +} + +func (n *node) Step(ctx context.Context, m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + // TODO: return an error? + return nil + } + return n.step(ctx, m) +} + +func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error { + data, err := cc.Marshal() + if err != nil { + return err + } + return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}}) +} + +// Step advances the state machine using msgs. The ctx.Err() will be returned, +// if any. +func (n *node) step(ctx context.Context, m pb.Message) error { + ch := n.recvc + if m.Type == pb.MsgProp { + ch = n.propc + } + + select { + case ch <- m: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } +} + +func (n *node) Ready() <-chan Ready { return n.readyc } + +func (n *node) Advance() { + select { + case n.advancec <- struct{}{}: + case <-n.done: + } +} + +func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { + var cs pb.ConfState + select { + case n.confc <- cc: + case <-n.done: + } + select { + case cs = <-n.confstatec: + case <-n.done: + } + return &cs +} + +func (n *node) Status() Status { + c := make(chan Status) + select { + case n.status <- c: + return <-c + case <-n.done: + return Status{} + } +} + +func (n *node) ReportUnreachable(id uint64) { + select { + case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: + case <-n.done: + } +} + +func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + select { + case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: + case <-n.done: + } +} + +func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) { + select { + // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership + case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}: + case <-n.done: + case <-ctx.Done(): + } +} + +func (n *node) ReadIndex(ctx context.Context, rctx []byte) error { + return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} + +func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { + rd := Ready{ + Entries: r.raftLog.unstableEntries(), + CommittedEntries: r.raftLog.nextEnts(), + Messages: r.msgs, + } + if softSt := r.softState(); !softSt.equal(prevSoftSt) { + rd.SoftState = softSt + } + if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { + rd.HardState = hardSt + } + if r.raftLog.unstable.snapshot != nil { + rd.Snapshot = *r.raftLog.unstable.snapshot + } + if len(r.readStates) != 0 { + rd.ReadStates = r.readStates + } + return rd +} diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go new file mode 100644 index 0000000..77c7b52 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/progress.go @@ -0,0 +1,279 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import "fmt" + +const ( + ProgressStateProbe ProgressStateType = iota + ProgressStateReplicate + ProgressStateSnapshot +) + +type ProgressStateType uint64 + +var prstmap = [...]string{ + "ProgressStateProbe", + "ProgressStateReplicate", + "ProgressStateSnapshot", +} + +func (st ProgressStateType) String() string { return prstmap[uint64(st)] } + +// Progress represents a follower’s progress in the view of the leader. Leader maintains +// progresses of all followers, and sends entries to the follower based on its progress. +type Progress struct { + Match, Next uint64 + // State defines how the leader should interact with the follower. + // + // When in ProgressStateProbe, leader sends at most one replication message + // per heartbeat interval. It also probes actual progress of the follower. + // + // When in ProgressStateReplicate, leader optimistically increases next + // to the latest entry sent after sending replication message. This is + // an optimized state for fast replicating log entries to the follower. + // + // When in ProgressStateSnapshot, leader should have sent out snapshot + // before and stops sending any replication message. + State ProgressStateType + // Paused is used in ProgressStateProbe. + // When Paused is true, raft should pause sending replication message to this peer. + Paused bool + // PendingSnapshot is used in ProgressStateSnapshot. + // If there is a pending snapshot, the pendingSnapshot will be set to the + // index of the snapshot. If pendingSnapshot is set, the replication process of + // this Progress will be paused. raft will not resend snapshot until the pending one + // is reported to be failed. + PendingSnapshot uint64 + + // RecentActive is true if the progress is recently active. Receiving any messages + // from the corresponding follower indicates the progress is active. + // RecentActive can be reset to false after an election timeout. + RecentActive bool + + // inflights is a sliding window for the inflight messages. + // Each inflight message contains one or more log entries. + // The max number of entries per message is defined in raft config as MaxSizePerMsg. + // Thus inflight effectively limits both the number of inflight messages + // and the bandwidth each Progress can use. + // When inflights is full, no more message should be sent. + // When a leader sends out a message, the index of the last + // entry should be added to inflights. The index MUST be added + // into inflights in order. + // When a leader receives a reply, the previous inflights should + // be freed by calling inflights.freeTo with the index of the last + // received entry. + ins *inflights +} + +func (pr *Progress) resetState(state ProgressStateType) { + pr.Paused = false + pr.PendingSnapshot = 0 + pr.State = state + pr.ins.reset() +} + +func (pr *Progress) becomeProbe() { + // If the original state is ProgressStateSnapshot, progress knows that + // the pending snapshot has been sent to this peer successfully, then + // probes from pendingSnapshot + 1. + if pr.State == ProgressStateSnapshot { + pendingSnapshot := pr.PendingSnapshot + pr.resetState(ProgressStateProbe) + pr.Next = max(pr.Match+1, pendingSnapshot+1) + } else { + pr.resetState(ProgressStateProbe) + pr.Next = pr.Match + 1 + } +} + +func (pr *Progress) becomeReplicate() { + pr.resetState(ProgressStateReplicate) + pr.Next = pr.Match + 1 +} + +func (pr *Progress) becomeSnapshot(snapshoti uint64) { + pr.resetState(ProgressStateSnapshot) + pr.PendingSnapshot = snapshoti +} + +// maybeUpdate returns false if the given n index comes from an outdated message. +// Otherwise it updates the progress and returns true. +func (pr *Progress) maybeUpdate(n uint64) bool { + var updated bool + if pr.Match < n { + pr.Match = n + updated = true + pr.resume() + } + if pr.Next < n+1 { + pr.Next = n + 1 + } + return updated +} + +func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 } + +// maybeDecrTo returns false if the given to index comes from an out of order message. +// Otherwise it decreases the progress next index to min(rejected, last) and returns true. +func (pr *Progress) maybeDecrTo(rejected, last uint64) bool { + if pr.State == ProgressStateReplicate { + // the rejection must be stale if the progress has matched and "rejected" + // is smaller than "match". + if rejected <= pr.Match { + return false + } + // directly decrease next to match + 1 + pr.Next = pr.Match + 1 + return true + } + + // the rejection must be stale if "rejected" does not match next - 1 + if pr.Next-1 != rejected { + return false + } + + if pr.Next = min(rejected, last+1); pr.Next < 1 { + pr.Next = 1 + } + pr.resume() + return true +} + +func (pr *Progress) pause() { pr.Paused = true } +func (pr *Progress) resume() { pr.Paused = false } + +// IsPaused returns whether sending log entries to this node has been +// paused. A node may be paused because it has rejected recent +// MsgApps, is currently waiting for a snapshot, or has reached the +// MaxInflightMsgs limit. +func (pr *Progress) IsPaused() bool { + switch pr.State { + case ProgressStateProbe: + return pr.Paused + case ProgressStateReplicate: + return pr.ins.full() + case ProgressStateSnapshot: + return true + default: + panic("unexpected state") + } +} + +func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 } + +// needSnapshotAbort returns true if snapshot progress's Match +// is equal or higher than the pendingSnapshot. +func (pr *Progress) needSnapshotAbort() bool { + return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot +} + +func (pr *Progress) String() string { + return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot) +} + +type inflights struct { + // the starting index in the buffer + start int + // number of inflights in the buffer + count int + + // the size of the buffer + size int + + // buffer contains the index of the last entry + // inside one message. + buffer []uint64 +} + +func newInflights(size int) *inflights { + return &inflights{ + size: size, + } +} + +// add adds an inflight into inflights +func (in *inflights) add(inflight uint64) { + if in.full() { + panic("cannot add into a full inflights") + } + next := in.start + in.count + size := in.size + if next >= size { + next -= size + } + if next >= len(in.buffer) { + in.growBuf() + } + in.buffer[next] = inflight + in.count++ +} + +// grow the inflight buffer by doubling up to inflights.size. We grow on demand +// instead of preallocating to inflights.size to handle systems which have +// thousands of Raft groups per process. +func (in *inflights) growBuf() { + newSize := len(in.buffer) * 2 + if newSize == 0 { + newSize = 1 + } else if newSize > in.size { + newSize = in.size + } + newBuffer := make([]uint64, newSize) + copy(newBuffer, in.buffer) + in.buffer = newBuffer +} + +// freeTo frees the inflights smaller or equal to the given `to` flight. +func (in *inflights) freeTo(to uint64) { + if in.count == 0 || to < in.buffer[in.start] { + // out of the left side of the window + return + } + + i, idx := 0, in.start + for i = 0; i < in.count; i++ { + if to < in.buffer[idx] { // found the first large inflight + break + } + + // increase index and maybe rotate + size := in.size + if idx++; idx >= size { + idx -= size + } + } + // free i inflights and set new start index + in.count -= i + in.start = idx + if in.count == 0 { + // inflights is empty, reset the start index so that we don't grow the + // buffer unnecessarily. + in.start = 0 + } +} + +func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) } + +// full returns true if the inflights is full. +func (in *inflights) full() bool { + return in.count == in.size +} + +// resets frees all inflights. +func (in *inflights) reset() { + in.count = 0 + in.start = 0 +} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go new file mode 100644 index 0000000..633cc14 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raft.go @@ -0,0 +1,1252 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "errors" + "fmt" + "math" + "math/rand" + "sort" + "strings" + "sync" + "time" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// None is a placeholder node ID used when there is no leader. +const None uint64 = 0 +const noLimit = math.MaxUint64 + +// Possible values for StateType. +const ( + StateFollower StateType = iota + StateCandidate + StateLeader + StatePreCandidate + numStates +) + +type ReadOnlyOption int + +const ( + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + ReadOnlySafe ReadOnlyOption = iota + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + ReadOnlyLeaseBased +) + +// Possible values for CampaignType +const ( + // campaignPreElection represents the first phase of a normal election when + // Config.PreVote is true. + campaignPreElection CampaignType = "CampaignPreElection" + // campaignElection represents a normal (time-based) election (the second phase + // of the election when Config.PreVote is true). + campaignElection CampaignType = "CampaignElection" + // campaignTransfer represents the type of leader transfer + campaignTransfer CampaignType = "CampaignTransfer" +) + +// lockedRand is a small wrapper around rand.Rand to provide +// synchronization. Only the methods needed by the code are exposed +// (e.g. Intn). +type lockedRand struct { + mu sync.Mutex + rand *rand.Rand +} + +func (r *lockedRand) Intn(n int) int { + r.mu.Lock() + v := r.rand.Intn(n) + r.mu.Unlock() + return v +} + +var globalRand = &lockedRand{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// CampaignType represents the type of campaigning +// the reason we use the type of string instead of uint64 +// is because it's simpler to compare and fill in raft entries +type CampaignType string + +// StateType represents the role of a node in a cluster. +type StateType uint64 + +var stmap = [...]string{ + "StateFollower", + "StateCandidate", + "StateLeader", + "StatePreCandidate", +} + +func (st StateType) String() string { + return stmap[uint64(st)] +} + +// Config contains the parameters to start a raft. +type Config struct { + // ID is the identity of the local raft. ID cannot be 0. + ID uint64 + + // peers contains the IDs of all nodes (including self) in the raft cluster. It + // should only be set when starting a new raft cluster. Restarting raft from + // previous configuration will panic if peers is set. peer is private and only + // used for testing right now. + peers []uint64 + + // ElectionTick is the number of Node.Tick invocations that must pass between + // elections. That is, if a follower does not receive any message from the + // leader of current term before ElectionTick has elapsed, it will become + // candidate and start an election. ElectionTick must be greater than + // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid + // unnecessary leader switching. + ElectionTick int + // HeartbeatTick is the number of Node.Tick invocations that must pass between + // heartbeats. That is, a leader sends heartbeat messages to maintain its + // leadership every HeartbeatTick ticks. + HeartbeatTick int + + // Storage is the storage for raft. raft generates entries and states to be + // stored in storage. raft reads the persisted entries and states out of + // Storage when it needs. raft reads out the previous state and configuration + // out of storage when restarting. + Storage Storage + // Applied is the last applied index. It should only be set when restarting + // raft. raft will not return entries to the application smaller or equal to + // Applied. If Applied is unset when restarting, raft might return previous + // applied entries. This is a very application dependent configuration. + Applied uint64 + + // MaxSizePerMsg limits the max size of each append message. Smaller value + // lowers the raft recovery cost(initial probing and message lost during normal + // operation). On the other side, it might affect the throughput during normal + // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per + // message. + MaxSizePerMsg uint64 + // MaxInflightMsgs limits the max number of in-flight append messages during + // optimistic replication phase. The application transportation layer usually + // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid + // overflowing that sending buffer. TODO (xiangli): feedback to application to + // limit the proposal rate? + MaxInflightMsgs int + + // CheckQuorum specifies if the leader should check quorum activity. Leader + // steps down when quorum is not active for an electionTimeout. + CheckQuorum bool + + // PreVote enables the Pre-Vote algorithm described in raft thesis section + // 9.6. This prevents disruption when a node that has been partitioned away + // rejoins the cluster. + PreVote bool + + // ReadOnlyOption specifies how the read only request is processed. + // + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + // + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + ReadOnlyOption ReadOnlyOption + + // Logger is the logger used for raft log. For multinode which can host + // multiple raft group, each raft group can have its own logger + Logger Logger +} + +func (c *Config) validate() error { + if c.ID == None { + return errors.New("cannot use none as id") + } + + if c.HeartbeatTick <= 0 { + return errors.New("heartbeat tick must be greater than 0") + } + + if c.ElectionTick <= c.HeartbeatTick { + return errors.New("election tick must be greater than heartbeat tick") + } + + if c.Storage == nil { + return errors.New("storage cannot be nil") + } + + if c.MaxInflightMsgs <= 0 { + return errors.New("max inflight messages must be greater than 0") + } + + if c.Logger == nil { + c.Logger = raftLogger + } + + return nil +} + +type raft struct { + id uint64 + + Term uint64 + Vote uint64 + + readStates []ReadState + + // the log + raftLog *raftLog + + maxInflight int + maxMsgSize uint64 + prs map[uint64]*Progress + + state StateType + + votes map[uint64]bool + + msgs []pb.Message + + // the leader id + lead uint64 + // leadTransferee is id of the leader transfer target when its value is not zero. + // Follow the procedure defined in raft thesis 3.10. + leadTransferee uint64 + // New configuration is ignored if there exists unapplied configuration. + pendingConf bool + + readOnly *readOnly + + // number of ticks since it reached last electionTimeout when it is leader + // or candidate. + // number of ticks since it reached last electionTimeout or received a + // valid message from current leader when it is a follower. + electionElapsed int + + // number of ticks since it reached last heartbeatTimeout. + // only leader keeps heartbeatElapsed. + heartbeatElapsed int + + checkQuorum bool + preVote bool + + heartbeatTimeout int + electionTimeout int + // randomizedElectionTimeout is a random number between + // [electiontimeout, 2 * electiontimeout - 1]. It gets reset + // when raft changes its state to follower or candidate. + randomizedElectionTimeout int + + tick func() + step stepFunc + + logger Logger +} + +func newRaft(c *Config) *raft { + if err := c.validate(); err != nil { + panic(err.Error()) + } + raftlog := newLog(c.Storage, c.Logger) + hs, cs, err := c.Storage.InitialState() + if err != nil { + panic(err) // TODO(bdarnell) + } + peers := c.peers + if len(cs.Nodes) > 0 { + if len(peers) > 0 { + // TODO(bdarnell): the peers argument is always nil except in + // tests; the argument should be removed and these tests should be + // updated to specify their nodes through a snapshot. + panic("cannot specify both newRaft(peers) and ConfState.Nodes)") + } + peers = cs.Nodes + } + r := &raft{ + id: c.ID, + lead: None, + raftLog: raftlog, + maxMsgSize: c.MaxSizePerMsg, + maxInflight: c.MaxInflightMsgs, + prs: make(map[uint64]*Progress), + electionTimeout: c.ElectionTick, + heartbeatTimeout: c.HeartbeatTick, + logger: c.Logger, + checkQuorum: c.CheckQuorum, + preVote: c.PreVote, + readOnly: newReadOnly(c.ReadOnlyOption), + } + for _, p := range peers { + r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} + } + if !isHardStateEqual(hs, emptyState) { + r.loadState(hs) + } + if c.Applied > 0 { + raftlog.appliedTo(c.Applied) + } + r.becomeFollower(r.Term, None) + + var nodesStrs []string + for _, n := range r.nodes() { + nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) + } + + r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]", + r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) + return r +} + +func (r *raft) hasLeader() bool { return r.lead != None } + +func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } + +func (r *raft) hardState() pb.HardState { + return pb.HardState{ + Term: r.Term, + Vote: r.Vote, + Commit: r.raftLog.committed, + } +} + +func (r *raft) quorum() int { return len(r.prs)/2 + 1 } + +func (r *raft) nodes() []uint64 { + nodes := make([]uint64, 0, len(r.prs)) + for id := range r.prs { + nodes = append(nodes, id) + } + sort.Sort(uint64Slice(nodes)) + return nodes +} + +// send persists state to stable storage and then sends to its mailbox. +func (r *raft) send(m pb.Message) { + m.From = r.id + if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + if m.Term == 0 { + // PreVote RPCs are sent at a term other than our actual term, so the code + // that sends these messages is responsible for setting the term. + panic(fmt.Sprintf("term should be set when sending %s", m.Type)) + } + } else { + if m.Term != 0 { + panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term)) + } + // do not attach term to MsgProp, MsgReadIndex + // proposals are a way to forward to the leader and + // should be treated as local message. + // MsgReadIndex is also forwarded to leader. + if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex { + m.Term = r.Term + } + } + r.msgs = append(r.msgs, m) +} + +// sendAppend sends RPC, with entries to the given peer. +func (r *raft) sendAppend(to uint64) { + pr := r.prs[to] + if pr.IsPaused() { + return + } + m := pb.Message{} + m.To = to + + term, errt := r.raftLog.term(pr.Next - 1) + ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) + + if errt != nil || erre != nil { // send snapshot if we failed to get term or entries + if !pr.RecentActive { + r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) + return + } + + m.Type = pb.MsgSnap + snapshot, err := r.raftLog.snapshot() + if err != nil { + if err == ErrSnapshotTemporarilyUnavailable { + r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) + return + } + panic(err) // TODO(bdarnell) + } + if IsEmptySnap(snapshot) { + panic("need non-empty snapshot") + } + m.Snapshot = snapshot + sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term + r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", + r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) + pr.becomeSnapshot(sindex) + r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) + } else { + m.Type = pb.MsgApp + m.Index = pr.Next - 1 + m.LogTerm = term + m.Entries = ents + m.Commit = r.raftLog.committed + if n := len(m.Entries); n != 0 { + switch pr.State { + // optimistically increase the next when in ProgressStateReplicate + case ProgressStateReplicate: + last := m.Entries[n-1].Index + pr.optimisticUpdate(last) + pr.ins.add(last) + case ProgressStateProbe: + pr.pause() + default: + r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) + } + } + } + r.send(m) +} + +// sendHeartbeat sends an empty MsgApp +func (r *raft) sendHeartbeat(to uint64, ctx []byte) { + // Attach the commit as min(to.matched, r.committed). + // When the leader sends out heartbeat message, + // the receiver(follower) might not be matched with the leader + // or it might not have all the committed entries. + // The leader MUST NOT forward the follower's commit to + // an unmatched index. + commit := min(r.prs[to].Match, r.raftLog.committed) + m := pb.Message{ + To: to, + Type: pb.MsgHeartbeat, + Commit: commit, + Context: ctx, + } + + r.send(m) +} + +// bcastAppend sends RPC, with entries to all peers that are not up-to-date +// according to the progress recorded in r.prs. +func (r *raft) bcastAppend() { + for id := range r.prs { + if id == r.id { + continue + } + r.sendAppend(id) + } +} + +// bcastHeartbeat sends RPC, without entries to all the peers. +func (r *raft) bcastHeartbeat() { + lastCtx := r.readOnly.lastPendingRequestCtx() + if len(lastCtx) == 0 { + r.bcastHeartbeatWithCtx(nil) + } else { + r.bcastHeartbeatWithCtx([]byte(lastCtx)) + } +} + +func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { + for id := range r.prs { + if id == r.id { + continue + } + r.sendHeartbeat(id, ctx) + } +} + +// maybeCommit attempts to advance the commit index. Returns true if +// the commit index changed (in which case the caller should call +// r.bcastAppend). +func (r *raft) maybeCommit() bool { + // TODO(bmizerany): optimize.. Currently naive + mis := make(uint64Slice, 0, len(r.prs)) + for id := range r.prs { + mis = append(mis, r.prs[id].Match) + } + sort.Sort(sort.Reverse(mis)) + mci := mis[r.quorum()-1] + return r.raftLog.maybeCommit(mci, r.Term) +} + +func (r *raft) reset(term uint64) { + if r.Term != term { + r.Term = term + r.Vote = None + } + r.lead = None + + r.electionElapsed = 0 + r.heartbeatElapsed = 0 + r.resetRandomizedElectionTimeout() + + r.abortLeaderTransfer() + + r.votes = make(map[uint64]bool) + for id := range r.prs { + r.prs[id] = &Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight)} + if id == r.id { + r.prs[id].Match = r.raftLog.lastIndex() + } + } + r.pendingConf = false + r.readOnly = newReadOnly(r.readOnly.option) +} + +func (r *raft) appendEntry(es ...pb.Entry) { + li := r.raftLog.lastIndex() + for i := range es { + es[i].Term = r.Term + es[i].Index = li + 1 + uint64(i) + } + r.raftLog.append(es...) + r.prs[r.id].maybeUpdate(r.raftLog.lastIndex()) + // Regardless of maybeCommit's return, our caller will call bcastAppend. + r.maybeCommit() +} + +// tickElection is run by followers and candidates after r.electionTimeout. +func (r *raft) tickElection() { + r.electionElapsed++ + + if r.promotable() && r.pastElectionTimeout() { + r.electionElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) + } +} + +// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. +func (r *raft) tickHeartbeat() { + r.heartbeatElapsed++ + r.electionElapsed++ + + if r.electionElapsed >= r.electionTimeout { + r.electionElapsed = 0 + if r.checkQuorum { + r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + } + // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. + if r.state == StateLeader && r.leadTransferee != None { + r.abortLeaderTransfer() + } + } + + if r.state != StateLeader { + return + } + + if r.heartbeatElapsed >= r.heartbeatTimeout { + r.heartbeatElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) + } +} + +func (r *raft) becomeFollower(term uint64, lead uint64) { + r.step = stepFollower + r.reset(term) + r.tick = r.tickElection + r.lead = lead + r.state = StateFollower + r.logger.Infof("%x became follower at term %d", r.id, r.Term) +} + +func (r *raft) becomeCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> candidate]") + } + r.step = stepCandidate + r.reset(r.Term + 1) + r.tick = r.tickElection + r.Vote = r.id + r.state = StateCandidate + r.logger.Infof("%x became candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomePreCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> pre-candidate]") + } + // Becoming a pre-candidate changes our step functions and state, + // but doesn't change anything else. In particular it does not increase + // r.Term or change r.Vote. + r.step = stepCandidate + r.tick = r.tickElection + r.state = StatePreCandidate + r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomeLeader() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateFollower { + panic("invalid transition [follower -> leader]") + } + r.step = stepLeader + r.reset(r.Term) + r.tick = r.tickHeartbeat + r.lead = r.id + r.state = StateLeader + ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err) + } + + nconf := numOfPendingConf(ents) + if nconf > 1 { + panic("unexpected multiple uncommitted config entry") + } + if nconf == 1 { + r.pendingConf = true + } + + r.appendEntry(pb.Entry{Data: nil}) + r.logger.Infof("%x became leader at term %d", r.id, r.Term) +} + +func (r *raft) campaign(t CampaignType) { + var term uint64 + var voteMsg pb.MessageType + if t == campaignPreElection { + r.becomePreCandidate() + voteMsg = pb.MsgPreVote + // PreVote RPCs are sent for the next term before we've incremented r.Term. + term = r.Term + 1 + } else { + r.becomeCandidate() + voteMsg = pb.MsgVote + term = r.Term + } + if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) { + // We won the election after voting for ourselves (which must mean that + // this is a single-node cluster). Advance to the next state. + if t == campaignPreElection { + r.campaign(campaignElection) + } else { + r.becomeLeader() + } + return + } + for id := range r.prs { + if id == r.id { + continue + } + r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term) + + var ctx []byte + if t == campaignTransfer { + ctx = []byte(t) + } + r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) + } +} + +func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) { + if v { + r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term) + } else { + r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) + } + if _, ok := r.votes[id]; !ok { + r.votes[id] = v + } + for _, vv := range r.votes { + if vv { + granted++ + } + } + return granted +} + +func (r *raft) Step(m pb.Message) error { + // Handle the message term, which may result in our stepping down to a follower. + switch { + case m.Term == 0: + // local message + case m.Term > r.Term: + lead := m.From + if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + force := bytes.Equal(m.Context, []byte(campaignTransfer)) + inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout + if !force && inLease { + // If a server receives a RequestVote request within the minimum election timeout + // of hearing from a current leader, it does not update its term or grant its vote + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed) + return nil + } + lead = None + } + switch { + case m.Type == pb.MsgPreVote: + // Never change our term in response to a PreVote + case m.Type == pb.MsgPreVoteResp && !m.Reject: + // We send pre-vote requests with a term in our future. If the + // pre-vote is granted, we will increment our term when we get a + // quorum. If it is not, the term comes from the node that + // rejected our vote so we should become a follower at the new + // term. + default: + r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + r.becomeFollower(m.Term, lead) + } + + case m.Term < r.Term: + if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { + // We have received messages from a leader at a lower term. It is possible + // that these messages were simply delayed in the network, but this could + // also mean that this node has advanced its term number during a network + // partition, and it is now unable to either win an election or to rejoin + // the majority on the old term. If checkQuorum is false, this will be + // handled by incrementing term numbers in response to MsgVote with a + // higher term, but if checkQuorum is true we may not advance the term on + // MsgVote and must generate other messages to advance the term. The net + // result of these two features is to minimize the disruption caused by + // nodes that have been removed from the cluster's configuration: a + // removed node will send MsgVotes (or MsgPreVotes) which will be ignored, + // but it will not receive MsgApp or MsgHeartbeat, so it will not create + // disruptive term increases + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) + } else { + // ignore other cases + r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + } + return nil + } + + switch m.Type { + case pb.MsgHup: + if r.state != StateLeader { + ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) + } + if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied { + r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n) + return nil + } + + r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term) + if r.preVote { + r.campaign(campaignPreElection) + } else { + r.campaign(campaignElection) + } + } else { + r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) + } + + case pb.MsgVote, pb.MsgPreVote: + // The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should + // always equal r.Term. + if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type)}) + if m.Type == pb.MsgVote { + // Only record real votes. + r.electionElapsed = 0 + r.Vote = m.From + } + } else { + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type), Reject: true}) + } + + default: + r.step(r, m) + } + return nil +} + +type stepFunc func(r *raft, m pb.Message) + +func stepLeader(r *raft, m pb.Message) { + // These message types do not require any progress for m.From. + switch m.Type { + case pb.MsgBeat: + r.bcastHeartbeat() + return + case pb.MsgCheckQuorum: + if !r.checkQuorumActive() { + r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) + r.becomeFollower(r.Term, None) + } + return + case pb.MsgProp: + if len(m.Entries) == 0 { + r.logger.Panicf("%x stepped empty MsgProp", r.id) + } + if _, ok := r.prs[r.id]; !ok { + // If we are not currently a member of the range (i.e. this node + // was removed from the configuration while serving as leader), + // drop any new proposals. + return + } + if r.leadTransferee != None { + r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) + return + } + + for i, e := range m.Entries { + if e.Type == pb.EntryConfChange { + if r.pendingConf { + r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String()) + m.Entries[i] = pb.Entry{Type: pb.EntryNormal} + } + r.pendingConf = true + } + } + r.appendEntry(m.Entries...) + r.bcastAppend() + return + case pb.MsgReadIndex: + if r.quorum() > 1 { + // thinking: use an interally defined context instead of the user given context. + // We can express this in terms of the term and index instead of a user-supplied value. + // This would allow multiple reads to piggyback on the same message. + switch r.readOnly.option { + case ReadOnlySafe: + r.readOnly.addRequest(r.raftLog.committed, m) + r.bcastHeartbeatWithCtx(m.Entries[0].Data) + case ReadOnlyLeaseBased: + var ri uint64 + if r.checkQuorum { + ri = r.raftLog.committed + } + if m.From == None || m.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries}) + } + } + } else { + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } + + return + } + + // All other message types require a progress for m.From (pr). + pr, prOk := r.prs[m.From] + if !prOk { + r.logger.Debugf("%x no progress available for %x", r.id, m.From) + return + } + switch m.Type { + case pb.MsgAppResp: + pr.RecentActive = true + + if m.Reject { + r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d", + r.id, m.RejectHint, m.From, m.Index) + if pr.maybeDecrTo(m.Index, m.RejectHint) { + r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) + if pr.State == ProgressStateReplicate { + pr.becomeProbe() + } + r.sendAppend(m.From) + } + } else { + oldPaused := pr.IsPaused() + if pr.maybeUpdate(m.Index) { + switch { + case pr.State == ProgressStateProbe: + pr.becomeReplicate() + case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort(): + r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + pr.becomeProbe() + case pr.State == ProgressStateReplicate: + pr.ins.freeTo(m.Index) + } + + if r.maybeCommit() { + r.bcastAppend() + } else if oldPaused { + // update() reset the wait state on this node. If we had delayed sending + // an update before, send it now. + r.sendAppend(m.From) + } + // Transfer leadership is in progress. + if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { + r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) + r.sendTimeoutNow(m.From) + } + } + } + case pb.MsgHeartbeatResp: + pr.RecentActive = true + pr.resume() + + // free one slot for the full inflights window to allow progress. + if pr.State == ProgressStateReplicate && pr.ins.full() { + pr.ins.freeFirstOne() + } + if pr.Match < r.raftLog.lastIndex() { + r.sendAppend(m.From) + } + + if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { + return + } + + ackCount := r.readOnly.recvAck(m) + if ackCount < r.quorum() { + return + } + + rss := r.readOnly.advance(m) + for _, rs := range rss { + req := rs.req + if req.From == None || req.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data}) + } else { + r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries}) + } + } + case pb.MsgSnapStatus: + if pr.State != ProgressStateSnapshot { + return + } + if !m.Reject { + pr.becomeProbe() + r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } else { + pr.snapshotFailure() + pr.becomeProbe() + r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } + // If snapshot finish, wait for the msgAppResp from the remote node before sending + // out the next msgApp. + // If snapshot failure, wait for a heartbeat interval before next try + pr.pause() + case pb.MsgUnreachable: + // During optimistic replication, if the remote becomes unreachable, + // there is huge probability that a MsgApp is lost. + if pr.State == ProgressStateReplicate { + pr.becomeProbe() + } + r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) + case pb.MsgTransferLeader: + leadTransferee := m.From + lastLeadTransferee := r.leadTransferee + if lastLeadTransferee != None { + if lastLeadTransferee == leadTransferee { + r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x", + r.id, r.Term, leadTransferee, leadTransferee) + return + } + r.abortLeaderTransfer() + r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee) + } + if leadTransferee == r.id { + r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id) + return + } + // Transfer leadership to third party. + r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) + // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed. + r.electionElapsed = 0 + r.leadTransferee = leadTransferee + if pr.Match == r.raftLog.lastIndex() { + r.sendTimeoutNow(leadTransferee) + r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee) + } else { + r.sendAppend(leadTransferee) + } + } +} + +// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is +// whether they respond to MsgVoteResp or MsgPreVoteResp. +func stepCandidate(r *raft, m pb.Message) { + // Only handle vote responses corresponding to our candidacy (while in + // StateCandidate, we may get stale MsgPreVoteResp messages in this term from + // our pre-candidate state). + var myVoteRespType pb.MessageType + if r.state == StatePreCandidate { + myVoteRespType = pb.MsgPreVoteResp + } else { + myVoteRespType = pb.MsgVoteResp + } + switch m.Type { + case pb.MsgProp: + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return + case pb.MsgApp: + r.becomeFollower(r.Term, m.From) + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.becomeFollower(r.Term, m.From) + r.handleHeartbeat(m) + case pb.MsgSnap: + r.becomeFollower(m.Term, m.From) + r.handleSnapshot(m) + case myVoteRespType: + gr := r.poll(m.From, m.Type, !m.Reject) + r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr) + switch r.quorum() { + case gr: + if r.state == StatePreCandidate { + r.campaign(campaignElection) + } else { + r.becomeLeader() + r.bcastAppend() + } + case len(r.votes) - gr: + r.becomeFollower(r.Term, None) + } + case pb.MsgTimeoutNow: + r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) + } +} + +func stepFollower(r *raft, m pb.Message) { + switch m.Type { + case pb.MsgProp: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return + } + m.To = r.lead + r.send(m) + case pb.MsgApp: + r.electionElapsed = 0 + r.lead = m.From + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.electionElapsed = 0 + r.lead = m.From + r.handleHeartbeat(m) + case pb.MsgSnap: + r.electionElapsed = 0 + r.lead = m.From + r.handleSnapshot(m) + case pb.MsgTransferLeader: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term) + return + } + m.To = r.lead + r.send(m) + case pb.MsgTimeoutNow: + if r.promotable() { + r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From) + // Leadership transfers never use pre-vote even if r.preVote is true; we + // know we are not recovering from a partition so there is no need for the + // extra round trip. + r.campaign(campaignTransfer) + } else { + r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From) + } + case pb.MsgReadIndex: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term) + return + } + m.To = r.lead + r.send(m) + case pb.MsgReadIndexResp: + if len(m.Entries) != 1 { + r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries)) + return + } + r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) + } +} + +func (r *raft) handleAppendEntries(m pb.Message) { + if m.Index < r.raftLog.committed { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + return + } + + if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) + } else { + r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x", + r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) + } +} + +func (r *raft) handleHeartbeat(m pb.Message) { + r.raftLog.commitTo(m.Commit) + r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context}) +} + +func (r *raft) handleSnapshot(m pb.Message) { + sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term + if r.restore(m.Snapshot) { + r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) + } else { + r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + } +} + +// restore recovers the state machine from a snapshot. It restores the log and the +// configuration of state machine. +func (r *raft) restore(s pb.Snapshot) bool { + if s.Metadata.Index <= r.raftLog.committed { + return false + } + if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + r.raftLog.commitTo(s.Metadata.Index) + return false + } + + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + + r.raftLog.restore(s) + r.prs = make(map[uint64]*Progress) + for _, n := range s.Metadata.ConfState.Nodes { + match, next := uint64(0), r.raftLog.lastIndex()+1 + if n == r.id { + match = next - 1 + } + r.setProgress(n, match, next) + r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.prs[n]) + } + return true +} + +// promotable indicates whether state machine can be promoted to leader, +// which is true when its own id is in progress list. +func (r *raft) promotable() bool { + _, ok := r.prs[r.id] + return ok +} + +func (r *raft) addNode(id uint64) { + r.pendingConf = false + if _, ok := r.prs[id]; ok { + // Ignore any redundant addNode calls (which can happen because the + // initial bootstrapping entries are applied twice). + return + } + + r.setProgress(id, 0, r.raftLog.lastIndex()+1) + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has a chance to communicate with us. + r.prs[id].RecentActive = true +} + +func (r *raft) removeNode(id uint64) { + r.delProgress(id) + r.pendingConf = false + + // do not try to commit or abort transferring if there is no nodes in the cluster. + if len(r.prs) == 0 { + return + } + + // The quorum size is now smaller, so see if any pending entries can + // be committed. + if r.maybeCommit() { + r.bcastAppend() + } + // If the removed node is the leadTransferee, then abort the leadership transferring. + if r.state == StateLeader && r.leadTransferee == id { + r.abortLeaderTransfer() + } +} + +func (r *raft) resetPendingConf() { r.pendingConf = false } + +func (r *raft) setProgress(id, match, next uint64) { + r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)} +} + +func (r *raft) delProgress(id uint64) { + delete(r.prs, id) +} + +func (r *raft) loadState(state pb.HardState) { + if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { + r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) + } + r.raftLog.committed = state.Commit + r.Term = state.Term + r.Vote = state.Vote +} + +// pastElectionTimeout returns true iff r.electionElapsed is greater +// than or equal to the randomized election timeout in +// [electiontimeout, 2 * electiontimeout - 1]. +func (r *raft) pastElectionTimeout() bool { + return r.electionElapsed >= r.randomizedElectionTimeout +} + +func (r *raft) resetRandomizedElectionTimeout() { + r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) +} + +// checkQuorumActive returns true if the quorum is active from +// the view of the local raft state machine. Otherwise, it returns +// false. +// checkQuorumActive also resets all RecentActive to false. +func (r *raft) checkQuorumActive() bool { + var act int + + for id := range r.prs { + if id == r.id { // self is always active + act++ + continue + } + + if r.prs[id].RecentActive { + act++ + } + + r.prs[id].RecentActive = false + } + + return act >= r.quorum() +} + +func (r *raft) sendTimeoutNow(to uint64) { + r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) +} + +func (r *raft) abortLeaderTransfer() { + r.leadTransferee = None +} + +func numOfPendingConf(ents []pb.Entry) int { + n := 0 + for i := range ents { + if ents[i].Type == pb.EntryConfChange { + n++ + } + } + return n +} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go new file mode 100644 index 0000000..86ad312 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -0,0 +1,1900 @@ +// Code generated by protoc-gen-gogo. +// source: raft.proto +// DO NOT EDIT! + +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange +*/ +package raftpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", +} +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Entry struct { + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Message struct { + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ConfState struct { + Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ConfChange struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` + NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` + Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, num := range m.Nodes { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.Context != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Raft(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Raft(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Index)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + l = m.ConfState.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 1 + sovRaft(uint64(m.Index)) + n += 1 + sovRaft(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovRaft(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.To)) + n += 1 + sovRaft(uint64(m.From)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.LogTerm)) + n += 1 + sovRaft(uint64(m.Index)) + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + n += 1 + sovRaft(uint64(m.Commit)) + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 2 + n += 1 + sovRaft(uint64(m.RejectHint)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Vote)) + n += 1 + sovRaft(uint64(m.Commit)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + n += 1 + sovRaft(uint64(e)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.ID)) + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 790 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e, + 0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc, + 0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79, + 0x80, 0x3c, 0x40, 0x2e, 0x79, 0x1f, 0x1f, 0x0d, 0xe4, 0x1e, 0xc4, 0xce, 0x8b, 0x04, 0xbb, 0x5c, + 0x4a, 0x94, 0x74, 0xdb, 0xf9, 0xbe, 0xe1, 0xcc, 0x37, 0xdf, 0xce, 0x12, 0x40, 0xd0, 0xa9, 0x3c, + 0x8e, 0x04, 0x97, 0x1c, 0x17, 0xd5, 0x39, 0xba, 0xde, 0x6f, 0xf8, 0xdc, 0xe7, 0x1a, 0xfa, 0x4d, + 0x9d, 0x12, 0xb6, 0xfd, 0x0e, 0x0a, 0x7f, 0x87, 0x52, 0xdc, 0xe3, 0x5f, 0xc1, 0x19, 0xdf, 0x47, + 0x8c, 0x58, 0x2d, 0xab, 0x53, 0xeb, 0xd6, 0x8f, 0x93, 0xaf, 0x8e, 0x35, 0xa9, 0x88, 0x53, 0xe7, + 0xe1, 0xcb, 0x4f, 0xb9, 0x91, 0x4e, 0xc2, 0x04, 0x9c, 0x31, 0x13, 0x01, 0xb1, 0x5b, 0x56, 0xc7, + 0x59, 0x32, 0x4c, 0x04, 0x78, 0x1f, 0x0a, 0x83, 0xd0, 0x63, 0x77, 0x24, 0x9f, 0xa1, 0x12, 0x08, + 0x63, 0x70, 0xfa, 0x54, 0x52, 0xe2, 0xb4, 0xac, 0x4e, 0x75, 0xa4, 0xcf, 0xed, 0xf7, 0x16, 0xa0, + 0xcb, 0x90, 0x46, 0xf1, 0x8c, 0xcb, 0x21, 0x93, 0xd4, 0xa3, 0x92, 0xe2, 0x3f, 0x01, 0x26, 0x3c, + 0x9c, 0xbe, 0x8a, 0x25, 0x95, 0x89, 0x22, 0x77, 0xa5, 0xa8, 0xc7, 0xc3, 0xe9, 0xa5, 0x22, 0x4c, + 0xf1, 0xca, 0x24, 0x05, 0x54, 0xf3, 0xb9, 0x6e, 0x9e, 0xd5, 0x95, 0x40, 0x4a, 0xb2, 0x54, 0x92, + 0xb3, 0xba, 0x34, 0xd2, 0xfe, 0x1f, 0xca, 0xa9, 0x02, 0x25, 0x51, 0x29, 0xd0, 0x3d, 0xab, 0x23, + 0x7d, 0xc6, 0x7f, 0x41, 0x39, 0x30, 0xca, 0x74, 0x61, 0xb7, 0x4b, 0x52, 0x2d, 0x9b, 0xca, 0x4d, + 0xdd, 0x65, 0x7e, 0xfb, 0x53, 0x1e, 0x4a, 0x43, 0x16, 0xc7, 0xd4, 0x67, 0xf8, 0x08, 0x1c, 0xb9, + 0x72, 0x78, 0x2f, 0xad, 0x61, 0xe8, 0xac, 0xc7, 0x2a, 0x0d, 0x37, 0xc0, 0x96, 0x7c, 0x6d, 0x12, + 0x5b, 0x72, 0x35, 0xc6, 0x54, 0xf0, 0x8d, 0x31, 0x14, 0xb2, 0x1c, 0xd0, 0xd9, 0x1c, 0x10, 0x37, + 0xa1, 0x74, 0xc3, 0x7d, 0x7d, 0x61, 0x85, 0x0c, 0x99, 0x82, 0x2b, 0xdb, 0x8a, 0xdb, 0xb6, 0x1d, + 0x41, 0x89, 0x85, 0x52, 0xcc, 0x59, 0x4c, 0x4a, 0xad, 0x7c, 0xc7, 0xed, 0xee, 0xac, 0x6d, 0x46, + 0x5a, 0xca, 0xe4, 0xe0, 0x03, 0x28, 0x4e, 0x78, 0x10, 0xcc, 0x25, 0x29, 0x67, 0x6a, 0x19, 0x0c, + 0x77, 0xa1, 0x1c, 0x1b, 0xc7, 0x48, 0x45, 0x3b, 0x89, 0x36, 0x9d, 0x4c, 0x1d, 0x4c, 0xf3, 0x54, + 0x45, 0xc1, 0x5e, 0xb3, 0x89, 0x24, 0xd0, 0xb2, 0x3a, 0xe5, 0xb4, 0x62, 0x82, 0xe1, 0x5f, 0x00, + 0x92, 0xd3, 0xd9, 0x3c, 0x94, 0xc4, 0xcd, 0xf4, 0xcc, 0xe0, 0x98, 0x40, 0x69, 0xc2, 0x43, 0xc9, + 0xee, 0x24, 0xa9, 0xea, 0x8b, 0x4d, 0xc3, 0xf6, 0x4b, 0xa8, 0x9c, 0x51, 0xe1, 0x25, 0xeb, 0x93, + 0x3a, 0x68, 0x6d, 0x39, 0x48, 0xc0, 0xb9, 0xe5, 0x92, 0xad, 0xef, 0xbb, 0x42, 0x32, 0x03, 0xe7, + 0xb7, 0x07, 0x6e, 0xff, 0x0c, 0x95, 0xe5, 0xba, 0xe2, 0x06, 0x14, 0x42, 0xee, 0xb1, 0x98, 0x58, + 0xad, 0x7c, 0xc7, 0x19, 0x25, 0x41, 0xfb, 0x83, 0x05, 0xa0, 0x72, 0x7a, 0x33, 0x1a, 0xfa, 0xfa, + 0xd6, 0x07, 0xfd, 0x35, 0x05, 0xf6, 0xa0, 0x8f, 0x7f, 0x37, 0x8f, 0xd3, 0xd6, 0xab, 0xf3, 0x63, + 0xf6, 0x29, 0x24, 0xdf, 0x6d, 0xbd, 0xd0, 0x03, 0x28, 0x9e, 0x73, 0x8f, 0x0d, 0xfa, 0xeb, 0xba, + 0x12, 0x4c, 0x19, 0xd2, 0x33, 0x86, 0x24, 0x8f, 0x31, 0x0d, 0x0f, 0xff, 0x80, 0xca, 0xf2, 0xc9, + 0xe3, 0x5d, 0x70, 0x75, 0x70, 0xce, 0x45, 0x40, 0x6f, 0x50, 0x0e, 0xef, 0xc1, 0xae, 0x06, 0x56, + 0x8d, 0x91, 0x75, 0xf8, 0xd9, 0x06, 0x37, 0xb3, 0xc4, 0x18, 0xa0, 0x38, 0x8c, 0xfd, 0xb3, 0x45, + 0x84, 0x72, 0xd8, 0x85, 0xd2, 0x30, 0xf6, 0x4f, 0x19, 0x95, 0xc8, 0x32, 0xc1, 0x85, 0xe0, 0x11, + 0xb2, 0x4d, 0xd6, 0x49, 0x14, 0xa1, 0x3c, 0xae, 0x01, 0x24, 0xe7, 0x11, 0x8b, 0x23, 0xe4, 0x98, + 0xc4, 0xff, 0xb8, 0x64, 0xa8, 0xa0, 0x44, 0x98, 0x40, 0xb3, 0x45, 0xc3, 0xaa, 0x85, 0x41, 0x25, + 0x8c, 0xa0, 0xaa, 0x9a, 0x31, 0x2a, 0xe4, 0xb5, 0xea, 0x52, 0xc6, 0x0d, 0x40, 0x59, 0x44, 0x7f, + 0x54, 0xc1, 0x18, 0x6a, 0xc3, 0xd8, 0xbf, 0x0a, 0x05, 0xa3, 0x93, 0x19, 0xbd, 0xbe, 0x61, 0x08, + 0x70, 0x1d, 0x76, 0x4c, 0x21, 0x75, 0x41, 0x8b, 0x18, 0xb9, 0x26, 0xad, 0x37, 0x63, 0x93, 0x37, + 0xff, 0x2e, 0xb8, 0x58, 0x04, 0xa8, 0x8a, 0x7f, 0x80, 0xfa, 0x30, 0xf6, 0xc7, 0x82, 0x86, 0xf1, + 0x94, 0x89, 0x7f, 0x18, 0xf5, 0x98, 0x40, 0x3b, 0xe6, 0xeb, 0xf1, 0x3c, 0x60, 0x7c, 0x21, 0xcf, + 0xf9, 0x5b, 0x54, 0x33, 0x62, 0x46, 0x8c, 0x7a, 0xfa, 0x87, 0x87, 0x76, 0x8d, 0x98, 0x25, 0xa2, + 0xc5, 0x20, 0x33, 0xef, 0x85, 0x60, 0x7a, 0xc4, 0xba, 0xe9, 0x6a, 0x62, 0x9d, 0x83, 0x0f, 0x5f, + 0x40, 0x6d, 0xfd, 0x7a, 0x95, 0x8e, 0x15, 0x72, 0xe2, 0x79, 0xea, 0x2e, 0x51, 0x0e, 0x13, 0x68, + 0xac, 0xe0, 0x11, 0x0b, 0xf8, 0x2d, 0xd3, 0x8c, 0xb5, 0xce, 0x5c, 0x45, 0x1e, 0x95, 0x09, 0x63, + 0x9f, 0x92, 0x87, 0xa7, 0x66, 0xee, 0xf1, 0xa9, 0x99, 0x7b, 0x78, 0x6e, 0x5a, 0x8f, 0xcf, 0x4d, + 0xeb, 0xeb, 0x73, 0xd3, 0xfa, 0xf8, 0xad, 0x99, 0xfb, 0x1e, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x30, + 0x01, 0x41, 0x3a, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto new file mode 100644 index 0000000..806a436 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto @@ -0,0 +1,93 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +message ConfState { + repeated uint64 nodes = 1; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; +} + +message ConfChange { + optional uint64 ID = 1 [(gogoproto.nullable) = false]; + optional ConfChangeType Type = 2 [(gogoproto.nullable) = false]; + optional uint64 NodeID = 3 [(gogoproto.nullable) = false]; + optional bytes Context = 4; +} diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go new file mode 100644 index 0000000..b950d51 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/rawnode.go @@ -0,0 +1,264 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// ErrStepLocalMsg is returned when try to step a local raft message +var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") + +// ErrStepPeerNotFound is returned when try to step a response message +// but there is no peer found in raft.prs for that node. +var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") + +// RawNode is a thread-unsafe Node. +// The methods of this struct correspond to the methods of Node and are described +// more fully there. +type RawNode struct { + raft *raft + prevSoftSt *SoftState + prevHardSt pb.HardState +} + +func (rn *RawNode) newReady() Ready { + return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) +} + +func (rn *RawNode) commitReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + if rn.prevHardSt.Commit != 0 { + // In most cases, prevHardSt and rd.HardState will be the same + // because when there are new entries to apply we just sent a + // HardState with an updated Commit value. However, on initial + // startup the two are different because we don't send a HardState + // until something changes, but we do send any un-applied but + // committed entries (and previously-committed entries may be + // incorporated into the snapshot, even if rd.CommittedEntries is + // empty). Therefore we mark all committed entries as applied + // whether they were included in rd.HardState or not. + rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit) + } + if len(rd.Entries) > 0 { + e := rd.Entries[len(rd.Entries)-1] + rn.raft.raftLog.stableTo(e.Index, e.Term) + } + if !IsEmptySnap(rd.Snapshot) { + rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) + } + if len(rd.ReadStates) != 0 { + rn.raft.readStates = nil + } +} + +// NewRawNode returns a new RawNode given configuration and a list of raft peers. +func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { + if config.ID == 0 { + panic("config.ID must not be zero") + } + r := newRaft(config) + rn := &RawNode{ + raft: r, + } + lastIndex, err := config.Storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + // If the log is empty, this is a new RawNode (like StartNode); otherwise it's + // restoring an existing RawNode (like RestartNode). + // TODO(bdarnell): rethink RawNode initialization and whether the application needs + // to be able to tell us when it expects the RawNode to exist. + if lastIndex == 0 { + r.becomeFollower(1, None) + ents := make([]pb.Entry, len(peers)) + for i, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + data, err := cc.Marshal() + if err != nil { + panic("unexpected marshal error") + } + + ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} + } + r.raftLog.append(ents...) + r.raftLog.committed = uint64(len(ents)) + for _, peer := range peers { + r.addNode(peer.ID) + } + } + + // Set the initial hard and soft states after performing all initialization. + rn.prevSoftSt = r.softState() + if lastIndex == 0 { + rn.prevHardSt = emptyState + } else { + rn.prevHardSt = r.hardState() + } + + return rn, nil +} + +// Tick advances the internal logical clock by a single tick. +func (rn *RawNode) Tick() { + rn.raft.tick() +} + +// TickQuiesced advances the internal logical clock by a single tick without +// performing any other state machine processing. It allows the caller to avoid +// periodic heartbeats and elections when all of the peers in a Raft group are +// known to be at the same state. Expected usage is to periodically invoke Tick +// or TickQuiesced depending on whether the group is "active" or "quiesced". +// +// WARNING: Be very careful about using this method as it subverts the Raft +// state machine. You should probably be using Tick instead. +func (rn *RawNode) TickQuiesced() { + rn.raft.electionElapsed++ +} + +// Campaign causes this RawNode to transition to candidate state. +func (rn *RawNode) Campaign() error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgHup, + }) +} + +// Propose proposes data be appended to the raft log. +func (rn *RawNode) Propose(data []byte) error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + From: rn.raft.id, + Entries: []pb.Entry{ + {Data: data}, + }}) +} + +// ProposeConfChange proposes a config change. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { + data, err := cc.Marshal() + if err != nil { + return err + } + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + Entries: []pb.Entry{ + {Type: pb.EntryConfChange, Data: data}, + }, + }) +} + +// ApplyConfChange applies a config change to the local node. +func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { + if cc.NodeID == None { + rn.raft.resetPendingConf() + return &pb.ConfState{Nodes: rn.raft.nodes()} + } + switch cc.Type { + case pb.ConfChangeAddNode: + rn.raft.addNode(cc.NodeID) + case pb.ConfChangeRemoveNode: + rn.raft.removeNode(cc.NodeID) + case pb.ConfChangeUpdateNode: + rn.raft.resetPendingConf() + default: + panic("unexpected conf type") + } + return &pb.ConfState{Nodes: rn.raft.nodes()} +} + +// Step advances the state machine using the given message. +func (rn *RawNode) Step(m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + return ErrStepLocalMsg + } + if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m.Type) { + return rn.raft.Step(m) + } + return ErrStepPeerNotFound +} + +// Ready returns the current point-in-time state of this RawNode. +func (rn *RawNode) Ready() Ready { + rd := rn.newReady() + rn.raft.msgs = nil + return rd +} + +// HasReady called when RawNode user need to check if any Ready pending. +// Checking logic in this method should be consistent with Ready.containsUpdates(). +func (rn *RawNode) HasReady() bool { + r := rn.raft + if !r.softState().equal(rn.prevSoftSt) { + return true + } + if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { + return true + } + if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) { + return true + } + if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { + return true + } + if len(r.readStates) != 0 { + return true + } + return false +} + +// Advance notifies the RawNode that the application has applied and saved progress in the +// last Ready results. +func (rn *RawNode) Advance(rd Ready) { + rn.commitReady(rd) +} + +// Status returns the current status of the given group. +func (rn *RawNode) Status() *Status { + status := getStatus(rn.raft) + return &status +} + +// ReportUnreachable reports the given node is not reachable for the last send. +func (rn *RawNode) ReportUnreachable(id uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) +} + +// ReportSnapshot reports the status of the sent snapshot. +func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) +} + +// TransferLeader tries to transfer leadership to the given transferee. +func (rn *RawNode) TransferLeader(transferee uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) +} + +// ReadIndex requests a read state. The read state will be set in ready. +// Read State has a read index. Once the application advances further than the read +// index, any linearizable read requests issued before the read request can be +// processed safely. The read state will have the same rctx attached. +func (rn *RawNode) ReadIndex(rctx []byte) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go new file mode 100644 index 0000000..05a21da --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/read_only.go @@ -0,0 +1,118 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "github.com/coreos/etcd/raft/raftpb" + +// ReadState provides state for read only query. +// It's caller's responsibility to call ReadIndex first before getting +// this state from ready, It's also caller's duty to differentiate if this +// state is what it requests through RequestCtx, eg. given a unique id as +// RequestCtx +type ReadState struct { + Index uint64 + RequestCtx []byte +} + +type readIndexStatus struct { + req pb.Message + index uint64 + acks map[uint64]struct{} +} + +type readOnly struct { + option ReadOnlyOption + pendingReadIndex map[string]*readIndexStatus + readIndexQueue []string +} + +func newReadOnly(option ReadOnlyOption) *readOnly { + return &readOnly{ + option: option, + pendingReadIndex: make(map[string]*readIndexStatus), + } +} + +// addRequest adds a read only reuqest into readonly struct. +// `index` is the commit index of the raft state machine when it received +// the read only request. +// `m` is the original read only request message from the local or remote node. +func (ro *readOnly) addRequest(index uint64, m pb.Message) { + ctx := string(m.Entries[0].Data) + if _, ok := ro.pendingReadIndex[ctx]; ok { + return + } + ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})} + ro.readIndexQueue = append(ro.readIndexQueue, ctx) +} + +// recvAck notifies the readonly struct that the raft state machine received +// an acknowledgment of the heartbeat that attached with the read only request +// context. +func (ro *readOnly) recvAck(m pb.Message) int { + rs, ok := ro.pendingReadIndex[string(m.Context)] + if !ok { + return 0 + } + + rs.acks[m.From] = struct{}{} + // add one to include an ack from local node + return len(rs.acks) + 1 +} + +// advance advances the read only request queue kept by the readonly struct. +// It dequeues the requests until it finds the read only request that has +// the same context as the given `m`. +func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { + var ( + i int + found bool + ) + + ctx := string(m.Context) + rss := []*readIndexStatus{} + + for _, okctx := range ro.readIndexQueue { + i++ + rs, ok := ro.pendingReadIndex[okctx] + if !ok { + panic("cannot find corresponding read state from pending map") + } + rss = append(rss, rs) + if okctx == ctx { + found = true + break + } + } + + if found { + ro.readIndexQueue = ro.readIndexQueue[i:] + for _, rs := range rss { + delete(ro.pendingReadIndex, string(rs.req.Context)) + } + return rss + } + + return nil +} + +// lastPendingRequestCtx returns the context of the last pending read only +// request in readonly struct. +func (ro *readOnly) lastPendingRequestCtx() string { + if len(ro.readIndexQueue) == 0 { + return "" + } + return ro.readIndexQueue[len(ro.readIndexQueue)-1] +} diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go new file mode 100644 index 0000000..b690fa5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/status.go @@ -0,0 +1,76 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +type Status struct { + ID uint64 + + pb.HardState + SoftState + + Applied uint64 + Progress map[uint64]Progress +} + +// getStatus gets a copy of the current raft status. +func getStatus(r *raft) Status { + s := Status{ID: r.id} + s.HardState = r.hardState() + s.SoftState = *r.softState() + + s.Applied = r.raftLog.applied + + if s.RaftState == StateLeader { + s.Progress = make(map[uint64]Progress) + for id, p := range r.prs { + s.Progress[id] = *p + } + } + + return s +} + +// MarshalJSON translates the raft status into JSON. +// TODO: try to simplify this by introducing ID type into raft +func (s Status) MarshalJSON() ([]byte, error) { + j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`, + s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState) + + if len(s.Progress) == 0 { + j += "}}" + } else { + for k, v := range s.Progress { + subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) + j += subj + } + // remove the trailing "," + j = j[:len(j)-1] + "}}" + } + return []byte(j), nil +} + +func (s Status) String() string { + b, err := s.MarshalJSON() + if err != nil { + raftLogger.Panicf("unexpected error: %v", err) + } + return string(b) +} diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go new file mode 100644 index 0000000..69c3a7d --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/storage.go @@ -0,0 +1,271 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + "sync" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// ErrCompacted is returned by Storage.Entries/Compact when a requested +// index is unavailable because it predates the last snapshot. +var ErrCompacted = errors.New("requested index is unavailable due to compaction") + +// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested +// index is older than the existing snapshot. +var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") + +// ErrUnavailable is returned by Storage interface when the requested log entries +// are unavailable. +var ErrUnavailable = errors.New("requested entry at index is unavailable") + +// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required +// snapshot is temporarily unavailable. +var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") + +// Storage is an interface that may be implemented by the application +// to retrieve log entries from storage. +// +// If any Storage method returns an error, the raft instance will +// become inoperable and refuse to participate in elections; the +// application is responsible for cleanup and recovery in this case. +type Storage interface { + // InitialState returns the saved HardState and ConfState information. + InitialState() (pb.HardState, pb.ConfState, error) + // Entries returns a slice of log entries in the range [lo,hi). + // MaxSize limits the total size of the log entries returned, but + // Entries returns at least one entry if any. + Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) + // Term returns the term of entry i, which must be in the range + // [FirstIndex()-1, LastIndex()]. The term of the entry before + // FirstIndex is retained for matching purposes even though the + // rest of that entry may not be available. + Term(i uint64) (uint64, error) + // LastIndex returns the index of the last entry in the log. + LastIndex() (uint64, error) + // FirstIndex returns the index of the first log entry that is + // possibly available via Entries (older entries have been incorporated + // into the latest Snapshot; if storage only contains the dummy entry the + // first log entry is not available). + FirstIndex() (uint64, error) + // Snapshot returns the most recent snapshot. + // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, + // so raft state machine could know that Storage needs some time to prepare + // snapshot and call Snapshot later. + Snapshot() (pb.Snapshot, error) +} + +// MemoryStorage implements the Storage interface backed by an +// in-memory array. +type MemoryStorage struct { + // Protects access to all fields. Most methods of MemoryStorage are + // run on the raft goroutine, but Append() is run on an application + // goroutine. + sync.Mutex + + hardState pb.HardState + snapshot pb.Snapshot + // ents[i] has raft log position i+snapshot.Metadata.Index + ents []pb.Entry +} + +// NewMemoryStorage creates an empty MemoryStorage. +func NewMemoryStorage() *MemoryStorage { + return &MemoryStorage{ + // When starting from scratch populate the list with a dummy entry at term zero. + ents: make([]pb.Entry, 1), + } +} + +// InitialState implements the Storage interface. +func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { + return ms.hardState, ms.snapshot.Metadata.ConfState, nil +} + +// SetHardState saves the current HardState. +func (ms *MemoryStorage) SetHardState(st pb.HardState) error { + ms.Lock() + defer ms.Unlock() + ms.hardState = st + return nil +} + +// Entries implements the Storage interface. +func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if lo <= offset { + return nil, ErrCompacted + } + if hi > ms.lastIndex()+1 { + raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) + } + // only contains dummy entries. + if len(ms.ents) == 1 { + return nil, ErrUnavailable + } + + ents := ms.ents[lo-offset : hi-offset] + return limitSize(ents, maxSize), nil +} + +// Term implements the Storage interface. +func (ms *MemoryStorage) Term(i uint64) (uint64, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if i < offset { + return 0, ErrCompacted + } + if int(i-offset) >= len(ms.ents) { + return 0, ErrUnavailable + } + return ms.ents[i-offset].Term, nil +} + +// LastIndex implements the Storage interface. +func (ms *MemoryStorage) LastIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.lastIndex(), nil +} + +func (ms *MemoryStorage) lastIndex() uint64 { + return ms.ents[0].Index + uint64(len(ms.ents)) - 1 +} + +// FirstIndex implements the Storage interface. +func (ms *MemoryStorage) FirstIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.firstIndex(), nil +} + +func (ms *MemoryStorage) firstIndex() uint64 { + return ms.ents[0].Index + 1 +} + +// Snapshot implements the Storage interface. +func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + return ms.snapshot, nil +} + +// ApplySnapshot overwrites the contents of this Storage object with +// those of the given snapshot. +func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { + ms.Lock() + defer ms.Unlock() + + //handle check for old snapshot being applied + msIndex := ms.snapshot.Metadata.Index + snapIndex := snap.Metadata.Index + if msIndex >= snapIndex { + return ErrSnapOutOfDate + } + + ms.snapshot = snap + ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} + return nil +} + +// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and +// can be used to reconstruct the state at that point. +// If any configuration changes have been made since the last compaction, +// the result of the last ApplyConfChange must be passed in. +func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + if i <= ms.snapshot.Metadata.Index { + return pb.Snapshot{}, ErrSnapOutOfDate + } + + offset := ms.ents[0].Index + if i > ms.lastIndex() { + raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) + } + + ms.snapshot.Metadata.Index = i + ms.snapshot.Metadata.Term = ms.ents[i-offset].Term + if cs != nil { + ms.snapshot.Metadata.ConfState = *cs + } + ms.snapshot.Data = data + return ms.snapshot, nil +} + +// Compact discards all log entries prior to compactIndex. +// It is the application's responsibility to not attempt to compact an index +// greater than raftLog.applied. +func (ms *MemoryStorage) Compact(compactIndex uint64) error { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if compactIndex <= offset { + return ErrCompacted + } + if compactIndex > ms.lastIndex() { + raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) + } + + i := compactIndex - offset + ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) + ents[0].Index = ms.ents[i].Index + ents[0].Term = ms.ents[i].Term + ents = append(ents, ms.ents[i+1:]...) + ms.ents = ents + return nil +} + +// Append the new entries to storage. +// TODO (xiangli): ensure the entries are continuous and +// entries[0].Index > ms.entries[0].Index +func (ms *MemoryStorage) Append(entries []pb.Entry) error { + if len(entries) == 0 { + return nil + } + + ms.Lock() + defer ms.Unlock() + + first := ms.firstIndex() + last := entries[0].Index + uint64(len(entries)) - 1 + + // shortcut if there is no new entry. + if last < first { + return nil + } + // truncate compacted entries + if first > entries[0].Index { + entries = entries[first-entries[0].Index:] + } + + offset := entries[0].Index - ms.ents[0].Index + switch { + case uint64(len(ms.ents)) > offset: + ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) + ms.ents = append(ms.ents, entries...) + case uint64(len(ms.ents)) == offset: + ms.ents = append(ms.ents, entries...) + default: + raftLogger.Panicf("missing log entry [last: %d, append at: %d]", + ms.lastIndex(), entries[0].Index) + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go new file mode 100644 index 0000000..f4141fe --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/util.go @@ -0,0 +1,129 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +func (st StateType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", st.String())), nil +} + +// uint64Slice implements sort interface +type uint64Slice []uint64 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || + msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum +} + +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp +} + +// voteResponseType maps vote and prevote message types to their corresponding responses. +func voteRespMsgType(msgt pb.MessageType) pb.MessageType { + switch msgt { + case pb.MsgVote: + return pb.MsgVoteResp + case pb.MsgPreVote: + return pb.MsgPreVoteResp + default: + panic(fmt.Sprintf("not a vote message: %s", msgt)) + } +} + +// EntryFormatter can be implemented by the application to provide human-readable formatting +// of entry data. Nil is a valid EntryFormatter and will use a default format. +type EntryFormatter func([]byte) string + +// DescribeMessage returns a concise human-readable description of a +// Message for debugging. +func DescribeMessage(m pb.Message, f EntryFormatter) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + if m.Reject { + fmt.Fprintf(&buf, " Rejected") + if m.RejectHint != 0 { + fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint) + } + } + if m.Commit != 0 { + fmt.Fprintf(&buf, " Commit:%d", m.Commit) + } + if len(m.Entries) > 0 { + fmt.Fprintf(&buf, " Entries:[") + for i, e := range m.Entries { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeEntry(e, f)) + } + fmt.Fprintf(&buf, "]") + } + if !IsEmptySnap(m.Snapshot) { + fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot) + } + return buf.String() +} + +// DescribeEntry returns a concise human-readable description of an +// Entry for debugging. +func DescribeEntry(e pb.Entry, f EntryFormatter) string { + var formatted string + if e.Type == pb.EntryNormal && f != nil { + formatted = f(e.Data) + } else { + formatted = fmt.Sprintf("%q", e.Data) + } + return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted) +} + +func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { + if len(ents) == 0 { + return ents + } + size := ents[0].Size() + var limit int + for limit = 1; limit < len(ents); limit++ { + size += ents[limit].Size() + if uint64(size) > maxSize { + break + } + } + return ents[:limit] +} diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go new file mode 100644 index 0000000..743deac --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/db.go @@ -0,0 +1,74 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/coreos/etcd/pkg/fileutil" +) + +// SaveDBFrom saves snapshot of the database from the given reader. It +// guarantees the save operation is atomic. +func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { + f, err := ioutil.TempFile(s.dir, "tmp") + if err != nil { + return 0, err + } + var n int64 + n, err = io.Copy(f, r) + if err == nil { + err = fileutil.Fsync(f) + } + f.Close() + if err != nil { + os.Remove(f.Name()) + return n, err + } + fn := path.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) + if fileutil.Exist(fn) { + os.Remove(f.Name()) + return n, nil + } + err = os.Rename(f.Name(), fn) + if err != nil { + os.Remove(f.Name()) + return n, err + } + + plog.Infof("saved database snapshot to disk [total bytes: %d]", n) + + return n, nil +} + +// DBFilePath returns the file path for the snapshot of the database with +// given id. If the snapshot does not exist, it returns error. +func (s *Snapshotter) DBFilePath(id uint64) (string, error) { + fns, err := fileutil.ReadDir(s.dir) + if err != nil { + return "", err + } + wfn := fmt.Sprintf("%016x.snap.db", id) + for _, fn := range fns { + if fn == wfn { + return path.Join(s.dir, fn), nil + } + } + return "", fmt.Errorf("snap: snapshot file doesn't exist") +} diff --git a/vendor/github.com/coreos/etcd/snap/message.go b/vendor/github.com/coreos/etcd/snap/message.go new file mode 100644 index 0000000..d73713f --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/message.go @@ -0,0 +1,64 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import ( + "io" + + "github.com/coreos/etcd/pkg/ioutil" + "github.com/coreos/etcd/raft/raftpb" +) + +// Message is a struct that contains a raft Message and a ReadCloser. The type +// of raft message MUST be MsgSnap, which contains the raft meta-data and an +// additional data []byte field that contains the snapshot of the actual state +// machine. +// Message contains the ReadCloser field for handling large snapshot. This avoid +// copying the entire snapshot into a byte array, which consumes a lot of memory. +// +// User of Message should close the Message after sending it. +type Message struct { + raftpb.Message + ReadCloser io.ReadCloser + TotalSize int64 + closeC chan bool +} + +func NewMessage(rs raftpb.Message, rc io.ReadCloser, rcSize int64) *Message { + return &Message{ + Message: rs, + ReadCloser: ioutil.NewExactReadCloser(rc, rcSize), + TotalSize: int64(rs.Size()) + rcSize, + closeC: make(chan bool, 1), + } +} + +// CloseNotify returns a channel that receives a single value +// when the message sent is finished. true indicates the sent +// is successful. +func (m Message) CloseNotify() <-chan bool { + return m.closeC +} + +func (m Message) CloseWithError(err error) { + if cerr := m.ReadCloser.Close(); cerr != nil { + err = cerr + } + if err == nil { + m.closeC <- true + } else { + m.closeC <- false + } +} diff --git a/vendor/github.com/coreos/etcd/snap/metrics.go b/vendor/github.com/coreos/etcd/snap/metrics.go new file mode 100644 index 0000000..433ef09 --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/metrics.go @@ -0,0 +1,41 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import "github.com/prometheus/client_golang/prometheus" + +var ( + // TODO: save_fsync latency? + saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "snap", + Name: "save_total_duration_seconds", + Help: "The total latency distributions of save called by snapshot.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) + + marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "snap", + Name: "save_marshalling_duration_seconds", + Help: "The marshalling cost distributions of save called by snapshot.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) +) + +func init() { + prometheus.MustRegister(saveDurations) + prometheus.MustRegister(marshallingDurations) +} diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go new file mode 100644 index 0000000..130e227 --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go @@ -0,0 +1,353 @@ +// Code generated by protoc-gen-gogo. +// source: snap.proto +// DO NOT EDIT! + +/* + Package snappb is a generated protocol buffer package. + + It is generated from these files: + snap.proto + + It has these top-level messages: + Snapshot +*/ +package snappb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Snapshot struct { + Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` + Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnap, []int{0} } + +func init() { + proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") +} +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintSnap(dAtA, i, uint64(m.Crc)) + if m.Data != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSnap(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Snap(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Snap(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintSnap(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Snapshot) Size() (n int) { + var l int + _ = l + n += 1 + sovSnap(uint64(m.Crc)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovSnap(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSnap(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSnap(x uint64) (n int) { + return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) + } + m.Crc = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Crc |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSnap + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnap(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnap + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSnap(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSnap + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSnap(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) } + +var fileDescriptorSnap = []byte{ + // 126 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, + 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, + 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, + 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, + 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24, + 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1, + 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, + 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.proto b/vendor/github.com/coreos/etcd/snap/snappb/snap.proto new file mode 100644 index 0000000..cd3d21d --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.proto @@ -0,0 +1,14 @@ +syntax = "proto2"; +package snappb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message snapshot { + optional uint32 crc = 1 [(gogoproto.nullable) = false]; + optional bytes data = 2; +} diff --git a/vendor/github.com/coreos/etcd/snap/snapshotter.go b/vendor/github.com/coreos/etcd/snap/snapshotter.go new file mode 100644 index 0000000..50d09dd --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/snapshotter.go @@ -0,0 +1,204 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package snap stores raft nodes' states with snapshots. +package snap + +import ( + "errors" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "path" + "sort" + "strings" + "time" + + pioutil "github.com/coreos/etcd/pkg/ioutil" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap/snappb" + + "github.com/coreos/pkg/capnslog" +) + +const ( + snapSuffix = ".snap" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap") + + ErrNoSnapshot = errors.New("snap: no available snapshot") + ErrEmptySnapshot = errors.New("snap: empty snapshot") + ErrCRCMismatch = errors.New("snap: crc mismatch") + crcTable = crc32.MakeTable(crc32.Castagnoli) + + // A map of valid files that can be present in the snap folder. + validFiles = map[string]bool{ + "db": true, + } +) + +type Snapshotter struct { + dir string +} + +func New(dir string) *Snapshotter { + return &Snapshotter{ + dir: dir, + } +} + +func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { + if raft.IsEmptySnap(snapshot) { + return nil + } + return s.save(&snapshot) +} + +func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { + start := time.Now() + + fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) + b := pbutil.MustMarshal(snapshot) + crc := crc32.Update(0, crcTable, b) + snap := snappb.Snapshot{Crc: crc, Data: b} + d, err := snap.Marshal() + if err != nil { + return err + } else { + marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second)) + } + + err = pioutil.WriteAndSyncFile(path.Join(s.dir, fname), d, 0666) + if err == nil { + saveDurations.Observe(float64(time.Since(start)) / float64(time.Second)) + } else { + err1 := os.Remove(path.Join(s.dir, fname)) + if err1 != nil { + plog.Errorf("failed to remove broken snapshot file %s", path.Join(s.dir, fname)) + } + } + return err +} + +func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { + names, err := s.snapNames() + if err != nil { + return nil, err + } + var snap *raftpb.Snapshot + for _, name := range names { + if snap, err = loadSnap(s.dir, name); err == nil { + break + } + } + if err != nil { + return nil, ErrNoSnapshot + } + return snap, nil +} + +func loadSnap(dir, name string) (*raftpb.Snapshot, error) { + fpath := path.Join(dir, name) + snap, err := Read(fpath) + if err != nil { + renameBroken(fpath) + } + return snap, err +} + +// Read reads the snapshot named by snapname and returns the snapshot. +func Read(snapname string) (*raftpb.Snapshot, error) { + b, err := ioutil.ReadFile(snapname) + if err != nil { + plog.Errorf("cannot read file %v: %v", snapname, err) + return nil, err + } + + if len(b) == 0 { + plog.Errorf("unexpected empty snapshot") + return nil, ErrEmptySnapshot + } + + var serializedSnap snappb.Snapshot + if err = serializedSnap.Unmarshal(b); err != nil { + plog.Errorf("corrupted snapshot file %v: %v", snapname, err) + return nil, err + } + + if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { + plog.Errorf("unexpected empty snapshot") + return nil, ErrEmptySnapshot + } + + crc := crc32.Update(0, crcTable, serializedSnap.Data) + if crc != serializedSnap.Crc { + plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname) + return nil, ErrCRCMismatch + } + + var snap raftpb.Snapshot + if err = snap.Unmarshal(serializedSnap.Data); err != nil { + plog.Errorf("corrupted snapshot file %v: %v", snapname, err) + return nil, err + } + return &snap, nil +} + +// snapNames returns the filename of the snapshots in logical time order (from newest to oldest). +// If there is no available snapshots, an ErrNoSnapshot will be returned. +func (s *Snapshotter) snapNames() ([]string, error) { + dir, err := os.Open(s.dir) + if err != nil { + return nil, err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + snaps := checkSuffix(names) + if len(snaps) == 0 { + return nil, ErrNoSnapshot + } + sort.Sort(sort.Reverse(sort.StringSlice(snaps))) + return snaps, nil +} + +func checkSuffix(names []string) []string { + snaps := []string{} + for i := range names { + if strings.HasSuffix(names[i], snapSuffix) { + snaps = append(snaps, names[i]) + } else { + // If we find a file which is not a snapshot then check if it's + // a vaild file. If not throw out a warning. + if _, ok := validFiles[names[i]]; !ok { + plog.Warningf("skipped unexpected non snapshot file %v", names[i]) + } + } + } + return snaps +} + +func renameBroken(path string) { + brokenPath := path + ".broken" + if err := os.Rename(path, brokenPath); err != nil { + plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err) + } +} diff --git a/vendor/github.com/coreos/etcd/wal/decoder.go b/vendor/github.com/coreos/etcd/wal/decoder.go new file mode 100644 index 0000000..0d9b442 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/decoder.go @@ -0,0 +1,185 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "bufio" + "encoding/binary" + "hash" + "io" + "sync" + + "github.com/coreos/etcd/pkg/crc" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal/walpb" +) + +const minSectorSize = 512 + +type decoder struct { + mu sync.Mutex + brs []*bufio.Reader + + // lastValidOff file offset following the last valid decoded record + lastValidOff int64 + crc hash.Hash32 +} + +func newDecoder(r ...io.Reader) *decoder { + readers := make([]*bufio.Reader, len(r)) + for i := range r { + readers[i] = bufio.NewReader(r[i]) + } + return &decoder{ + brs: readers, + crc: crc.New(0, crcTable), + } +} + +func (d *decoder) decode(rec *walpb.Record) error { + rec.Reset() + d.mu.Lock() + defer d.mu.Unlock() + return d.decodeRecord(rec) +} + +func (d *decoder) decodeRecord(rec *walpb.Record) error { + if len(d.brs) == 0 { + return io.EOF + } + + l, err := readInt64(d.brs[0]) + if err == io.EOF || (err == nil && l == 0) { + // hit end of file or preallocated space + d.brs = d.brs[1:] + if len(d.brs) == 0 { + return io.EOF + } + d.lastValidOff = 0 + return d.decodeRecord(rec) + } + if err != nil { + return err + } + + recBytes, padBytes := decodeFrameSize(l) + + data := make([]byte, recBytes+padBytes) + if _, err = io.ReadFull(d.brs[0], data); err != nil { + // ReadFull returns io.EOF only if no bytes were read + // the decoder should treat this as an ErrUnexpectedEOF instead. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if err := rec.Unmarshal(data[:recBytes]); err != nil { + if d.isTornEntry(data) { + return io.ErrUnexpectedEOF + } + return err + } + + // skip crc checking if the record type is crcType + if rec.Type != crcType { + d.crc.Write(rec.Data) + if err := rec.Validate(d.crc.Sum32()); err != nil { + if d.isTornEntry(data) { + return io.ErrUnexpectedEOF + } + return err + } + } + // record decoded as valid; point last valid offset to end of record + d.lastValidOff += recBytes + padBytes + 8 + return nil +} + +func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) { + // the record size is stored in the lower 56 bits of the 64-bit length + recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56)) + // non-zero padding is indicated by set MSb / a negative length + if lenField < 0 { + // padding is stored in lower 3 bits of length MSB + padBytes = int64((uint64(lenField) >> 56) & 0x7) + } + return +} + +// isTornEntry determines whether the last entry of the WAL was partially written +// and corrupted because of a torn write. +func (d *decoder) isTornEntry(data []byte) bool { + if len(d.brs) != 1 { + return false + } + + fileOff := d.lastValidOff + 8 + curOff := 0 + chunks := [][]byte{} + // split data on sector boundaries + for curOff < len(data) { + chunkLen := int(minSectorSize - (fileOff % minSectorSize)) + if chunkLen > len(data)-curOff { + chunkLen = len(data) - curOff + } + chunks = append(chunks, data[curOff:curOff+chunkLen]) + fileOff += int64(chunkLen) + curOff += chunkLen + } + + // if any data for a sector chunk is all 0, it's a torn write + for _, sect := range chunks { + isZero := true + for _, v := range sect { + if v != 0 { + isZero = false + break + } + } + if isZero { + return true + } + } + return false +} + +func (d *decoder) updateCRC(prevCrc uint32) { + d.crc = crc.New(prevCrc, crcTable) +} + +func (d *decoder) lastCRC() uint32 { + return d.crc.Sum32() +} + +func (d *decoder) lastOffset() int64 { return d.lastValidOff } + +func mustUnmarshalEntry(d []byte) raftpb.Entry { + var e raftpb.Entry + pbutil.MustUnmarshal(&e, d) + return e +} + +func mustUnmarshalState(d []byte) raftpb.HardState { + var s raftpb.HardState + pbutil.MustUnmarshal(&s, d) + return s +} + +func readInt64(r io.Reader) (int64, error) { + var n int64 + err := binary.Read(r, binary.LittleEndian, &n) + return n, err +} diff --git a/vendor/github.com/coreos/etcd/wal/doc.go b/vendor/github.com/coreos/etcd/wal/doc.go new file mode 100644 index 0000000..a3abd69 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/doc.go @@ -0,0 +1,75 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package wal provides an implementation of a write ahead log that is used by +etcd. + +A WAL is created at a particular directory and is made up of a number of +segmented WAL files. Inside of each file the raft state and entries are appended +to it with the Save method: + + metadata := []byte{} + w, err := wal.Create("/var/lib/etcd", metadata) + ... + err := w.Save(s, ents) + +After saving a raft snapshot to disk, SaveSnapshot method should be called to +record it. So WAL can match with the saved snapshot when restarting. + + err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2}) + +When a user has finished using a WAL it must be closed: + + w.Close() + +Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record +protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a +64-bit packed structure holding the length of the remaining logical record data in its lower +56 bits and its physical padding in the first three bits of the most significant byte. Each +record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32 +value of all record protobufs preceding the current record. + +WAL files are placed inside of the directory in the following format: +$seq-$index.wal + +The first WAL file to be created will be 0000000000000000-0000000000000000.wal +indicating an initial sequence of 0 and an initial raft index of 0. The first +entry written to WAL MUST have raft index 0. + +WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal +sequence number and cause a new file to be created. If the last raft index saved +was 0x20 and this is the first time cut has been called on this WAL then the sequence will +increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal. +If a second cut issues 0x10 entries with incremental index later then the file will be called: +0000000000000002-0000000000000031.wal. + +At a later time a WAL can be opened at a particular snapshot. If there is no +snapshot, an empty snapshot should be passed in. + + w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2}) + ... + +The snapshot must have been written to the WAL. + +Additional items cannot be Saved to this WAL until all of the items from the given +snapshot to the end of the WAL are read first: + + metadata, state, ents, err := w.ReadAll() + +This will give you the metadata, the last raft.State and the slice of +raft.Entry items in the log. + +*/ +package wal diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go new file mode 100644 index 0000000..efe5892 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/encoder.go @@ -0,0 +1,120 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "encoding/binary" + "hash" + "io" + "os" + "sync" + + "github.com/coreos/etcd/pkg/crc" + "github.com/coreos/etcd/pkg/ioutil" + "github.com/coreos/etcd/wal/walpb" +) + +// walPageBytes is the alignment for flushing records to the backing Writer. +// It should be a multiple of the minimum sector size so that WAL can safely +// distinguish between torn writes and ordinary data corruption. +const walPageBytes = 8 * minSectorSize + +type encoder struct { + mu sync.Mutex + bw *ioutil.PageWriter + + crc hash.Hash32 + buf []byte + uint64buf []byte +} + +func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { + return &encoder{ + bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset), + crc: crc.New(prevCrc, crcTable), + // 1MB buffer + buf: make([]byte, 1024*1024), + uint64buf: make([]byte, 8), + } +} + +// newFileEncoder creates a new encoder with current file offset for the page writer. +func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { + offset, err := f.Seek(0, os.SEEK_CUR) + if err != nil { + return nil, err + } + return newEncoder(f, prevCrc, int(offset)), nil +} + +func (e *encoder) encode(rec *walpb.Record) error { + e.mu.Lock() + defer e.mu.Unlock() + + e.crc.Write(rec.Data) + rec.Crc = e.crc.Sum32() + var ( + data []byte + err error + n int + ) + + if rec.Size() > len(e.buf) { + data, err = rec.Marshal() + if err != nil { + return err + } + } else { + n, err = rec.MarshalTo(e.buf) + if err != nil { + return err + } + data = e.buf[:n] + } + + lenField, padBytes := encodeFrameSize(len(data)) + if err = writeUint64(e.bw, lenField, e.uint64buf); err != nil { + return err + } + + if padBytes != 0 { + data = append(data, make([]byte, padBytes)...) + } + _, err = e.bw.Write(data) + return err +} + +func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) { + lenField = uint64(dataBytes) + // force 8 byte alignment so length never gets a torn write + padBytes = (8 - (dataBytes % 8)) % 8 + if padBytes != 0 { + lenField |= uint64(0x80|padBytes) << 56 + } + return +} + +func (e *encoder) flush() error { + e.mu.Lock() + defer e.mu.Unlock() + return e.bw.Flush() +} + +func writeUint64(w io.Writer, n uint64, buf []byte) error { + // http://golang.org/src/encoding/binary/binary.go + binary.LittleEndian.PutUint64(buf, n) + _, err := w.Write(buf) + return err +} diff --git a/vendor/github.com/coreos/etcd/wal/file_pipeline.go b/vendor/github.com/coreos/etcd/wal/file_pipeline.go new file mode 100644 index 0000000..3412210 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/file_pipeline.go @@ -0,0 +1,97 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "fmt" + "os" + "path" + + "github.com/coreos/etcd/pkg/fileutil" +) + +// filePipeline pipelines allocating disk space +type filePipeline struct { + // dir to put files + dir string + // size of files to make, in bytes + size int64 + // count number of files generated + count int + + filec chan *fileutil.LockedFile + errc chan error + donec chan struct{} +} + +func newFilePipeline(dir string, fileSize int64) *filePipeline { + fp := &filePipeline{ + dir: dir, + size: fileSize, + filec: make(chan *fileutil.LockedFile), + errc: make(chan error, 1), + donec: make(chan struct{}), + } + go fp.run() + return fp +} + +// Open returns a fresh file for writing. Rename the file before calling +// Open again or there will be file collisions. +func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) { + select { + case f = <-fp.filec: + case err = <-fp.errc: + } + return +} + +func (fp *filePipeline) Close() error { + close(fp.donec) + return <-fp.errc +} + +func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) { + // count % 2 so this file isn't the same as the one last published + fpath := path.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2)) + if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil { + return nil, err + } + if err = fileutil.Preallocate(f.File, fp.size, true); err != nil { + plog.Errorf("failed to allocate space when creating new wal file (%v)", err) + f.Close() + return nil, err + } + fp.count++ + return f, nil +} + +func (fp *filePipeline) run() { + defer close(fp.errc) + for { + f, err := fp.alloc() + if err != nil { + fp.errc <- err + return + } + select { + case fp.filec <- f: + case <-fp.donec: + os.Remove(f.Name()) + f.Close() + return + } + } +} diff --git a/vendor/github.com/coreos/etcd/wal/metrics.go b/vendor/github.com/coreos/etcd/wal/metrics.go new file mode 100644 index 0000000..9e089d3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/metrics.go @@ -0,0 +1,31 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import "github.com/prometheus/client_golang/prometheus" + +var ( + syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "wal_fsync_duration_seconds", + Help: "The latency distributions of fsync called by wal.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) +) + +func init() { + prometheus.MustRegister(syncDurations) +} diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go new file mode 100644 index 0000000..0a920e2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/repair.go @@ -0,0 +1,99 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "io" + "os" + "path" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/wal/walpb" +) + +// Repair tries to repair ErrUnexpectedEOF in the +// last wal file by truncating. +func Repair(dirpath string) bool { + f, err := openLast(dirpath) + if err != nil { + return false + } + defer f.Close() + + rec := &walpb.Record{} + decoder := newDecoder(f) + for { + lastOffset := decoder.lastOffset() + err := decoder.decode(rec) + switch err { + case nil: + // update crc of the decoder when necessary + switch rec.Type { + case crcType: + crc := decoder.crc.Sum32() + // current crc of decoder must match the crc of the record. + // do no need to match 0 crc, since the decoder is a new one at this case. + if crc != 0 && rec.Validate(crc) != nil { + return false + } + decoder.updateCRC(rec.Crc) + } + continue + case io.EOF: + return true + case io.ErrUnexpectedEOF: + plog.Noticef("repairing %v", f.Name()) + bf, bferr := os.Create(f.Name() + ".broken") + if bferr != nil { + plog.Errorf("could not repair %v, failed to create backup file", f.Name()) + return false + } + defer bf.Close() + + if _, err = f.Seek(0, os.SEEK_SET); err != nil { + plog.Errorf("could not repair %v, failed to read file", f.Name()) + return false + } + + if _, err = io.Copy(bf, f); err != nil { + plog.Errorf("could not repair %v, failed to copy file", f.Name()) + return false + } + + if err = f.Truncate(int64(lastOffset)); err != nil { + plog.Errorf("could not repair %v, failed to truncate file", f.Name()) + return false + } + if err = fileutil.Fsync(f.File); err != nil { + plog.Errorf("could not repair %v, failed to sync file", f.Name()) + return false + } + return true + default: + plog.Errorf("could not repair error (%v)", err) + return false + } + } +} + +// openLast opens the last wal file for read and write. +func openLast(dirpath string) (*fileutil.LockedFile, error) { + names, err := readWalNames(dirpath) + if err != nil { + return nil, err + } + last := path.Join(dirpath, names[len(names)-1]) + return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode) +} diff --git a/vendor/github.com/coreos/etcd/wal/util.go b/vendor/github.com/coreos/etcd/wal/util.go new file mode 100644 index 0000000..5c56e22 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/util.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "errors" + "fmt" + "strings" + + "github.com/coreos/etcd/pkg/fileutil" +) + +var ( + badWalName = errors.New("bad wal name") +) + +func Exist(dirpath string) bool { + names, err := fileutil.ReadDir(dirpath) + if err != nil { + return false + } + return len(names) != 0 +} + +// searchIndex returns the last array index of names whose raft index section is +// equal to or smaller than the given index. +// The given names MUST be sorted. +func searchIndex(names []string, index uint64) (int, bool) { + for i := len(names) - 1; i >= 0; i-- { + name := names[i] + _, curIndex, err := parseWalName(name) + if err != nil { + plog.Panicf("parse correct name should never fail: %v", err) + } + if index >= curIndex { + return i, true + } + } + return -1, false +} + +// names should have been sorted based on sequence number. +// isValidSeq checks whether seq increases continuously. +func isValidSeq(names []string) bool { + var lastSeq uint64 + for _, name := range names { + curSeq, _, err := parseWalName(name) + if err != nil { + plog.Panicf("parse correct name should never fail: %v", err) + } + if lastSeq != 0 && lastSeq != curSeq-1 { + return false + } + lastSeq = curSeq + } + return true +} +func readWalNames(dirpath string) ([]string, error) { + names, err := fileutil.ReadDir(dirpath) + if err != nil { + return nil, err + } + wnames := checkWalNames(names) + if len(wnames) == 0 { + return nil, ErrFileNotFound + } + return wnames, nil +} + +func checkWalNames(names []string) []string { + wnames := make([]string, 0) + for _, name := range names { + if _, _, err := parseWalName(name); err != nil { + // don't complain about left over tmp files + if !strings.HasSuffix(name, ".tmp") { + plog.Warningf("ignored file %v in wal", name) + } + continue + } + wnames = append(wnames, name) + } + return wnames +} + +func parseWalName(str string) (seq, index uint64, err error) { + if !strings.HasSuffix(str, ".wal") { + return 0, 0, badWalName + } + _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index) + return seq, index, err +} + +func walName(seq, index uint64) string { + return fmt.Sprintf("%016x-%016x.wal", seq, index) +} diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go new file mode 100644 index 0000000..69ed6b2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/wal.go @@ -0,0 +1,637 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "bytes" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path" + "sync" + "time" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal/walpb" + + "github.com/coreos/pkg/capnslog" +) + +const ( + metadataType int64 = iota + 1 + entryType + stateType + crcType + snapshotType + + // warnSyncDuration is the amount of time allotted to an fsync before + // logging a warning + warnSyncDuration = time.Second +) + +var ( + // SegmentSizeBytes is the preallocated size of each wal segment file. + // The actual size might be larger than this. In general, the default + // value should be used, but this is defined as an exported variable + // so that tests can set a different segment size. + SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal") + + ErrMetadataConflict = errors.New("wal: conflicting metadata found") + ErrFileNotFound = errors.New("wal: file not found") + ErrCRCMismatch = errors.New("wal: crc mismatch") + ErrSnapshotMismatch = errors.New("wal: snapshot mismatch") + ErrSnapshotNotFound = errors.New("wal: snapshot not found") + crcTable = crc32.MakeTable(crc32.Castagnoli) +) + +// WAL is a logical representation of the stable storage. +// WAL is either in read mode or append mode but not both. +// A newly created WAL is in append mode, and ready for appending records. +// A just opened WAL is in read mode, and ready for reading records. +// The WAL will be ready for appending after reading out all the previous records. +type WAL struct { + dir string // the living directory of the underlay files + + // dirFile is a fd for the wal directory for syncing on Rename + dirFile *os.File + + metadata []byte // metadata recorded at the head of each WAL + state raftpb.HardState // hardstate recorded at the head of WAL + + start walpb.Snapshot // snapshot to start reading + decoder *decoder // decoder to decode records + readClose func() error // closer for decode reader + + mu sync.Mutex + enti uint64 // index of the last entry saved to the wal + encoder *encoder // encoder to encode records + + locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing) + fp *filePipeline +} + +// Create creates a WAL ready for appending records. The given metadata is +// recorded at the head of each WAL file, and can be retrieved with ReadAll. +func Create(dirpath string, metadata []byte) (*WAL, error) { + if Exist(dirpath) { + return nil, os.ErrExist + } + + // keep temporary wal directory so WAL initialization appears atomic + tmpdirpath := path.Clean(dirpath) + ".tmp" + if fileutil.Exist(tmpdirpath) { + if err := os.RemoveAll(tmpdirpath); err != nil { + return nil, err + } + } + if err := fileutil.CreateDirAll(tmpdirpath); err != nil { + return nil, err + } + + p := path.Join(tmpdirpath, walName(0, 0)) + f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode) + if err != nil { + return nil, err + } + if _, err = f.Seek(0, os.SEEK_END); err != nil { + return nil, err + } + if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { + return nil, err + } + + w := &WAL{ + dir: dirpath, + metadata: metadata, + } + w.encoder, err = newFileEncoder(f.File, 0) + if err != nil { + return nil, err + } + w.locks = append(w.locks, f) + if err = w.saveCrc(0); err != nil { + return nil, err + } + if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil { + return nil, err + } + if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil { + return nil, err + } + + if w, err = w.renameWal(tmpdirpath); err != nil { + return nil, err + } + + // directory was renamed; sync parent dir to persist rename + pdir, perr := fileutil.OpenDir(path.Dir(w.dir)) + if perr != nil { + return nil, perr + } + if perr = fileutil.Fsync(pdir); perr != nil { + return nil, perr + } + if perr = pdir.Close(); err != nil { + return nil, perr + } + + return w, nil +} + +// Open opens the WAL at the given snap. +// The snap SHOULD have been previously saved to the WAL, or the following +// ReadAll will fail. +// The returned WAL is ready to read and the first record will be the one after +// the given snap. The WAL cannot be appended to before reading out all of its +// previous records. +func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) { + w, err := openAtIndex(dirpath, snap, true) + if err != nil { + return nil, err + } + if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil { + return nil, err + } + return w, nil +} + +// OpenForRead only opens the wal files for read. +// Write on a read only wal panics. +func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) { + return openAtIndex(dirpath, snap, false) +} + +func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { + names, err := readWalNames(dirpath) + if err != nil { + return nil, err + } + + nameIndex, ok := searchIndex(names, snap.Index) + if !ok || !isValidSeq(names[nameIndex:]) { + return nil, ErrFileNotFound + } + + // open the wal files + rcs := make([]io.ReadCloser, 0) + rs := make([]io.Reader, 0) + ls := make([]*fileutil.LockedFile, 0) + for _, name := range names[nameIndex:] { + p := path.Join(dirpath, name) + if write { + l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode) + if err != nil { + closeAll(rcs...) + return nil, err + } + ls = append(ls, l) + rcs = append(rcs, l) + } else { + rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode) + if err != nil { + closeAll(rcs...) + return nil, err + } + ls = append(ls, nil) + rcs = append(rcs, rf) + } + rs = append(rs, rcs[len(rcs)-1]) + } + + closer := func() error { return closeAll(rcs...) } + + // create a WAL ready for reading + w := &WAL{ + dir: dirpath, + start: snap, + decoder: newDecoder(rs...), + readClose: closer, + locks: ls, + } + + if write { + // write reuses the file descriptors from read; don't close so + // WAL can append without dropping the file lock + w.readClose = nil + if _, _, err := parseWalName(path.Base(w.tail().Name())); err != nil { + closer() + return nil, err + } + w.fp = newFilePipeline(w.dir, SegmentSizeBytes) + } + + return w, nil +} + +// ReadAll reads out records of the current WAL. +// If opened in write mode, it must read out all records until EOF. Or an error +// will be returned. +// If opened in read mode, it will try to read all records if possible. +// If it cannot read out the expected snap, it will return ErrSnapshotNotFound. +// If loaded snap doesn't match with the expected one, it will return +// all the records and error ErrSnapshotMismatch. +// TODO: detect not-last-snap error. +// TODO: maybe loose the checking of match. +// After ReadAll, the WAL will be ready for appending new records. +func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + rec := &walpb.Record{} + decoder := w.decoder + + var match bool + for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { + switch rec.Type { + case entryType: + e := mustUnmarshalEntry(rec.Data) + if e.Index > w.start.Index { + ents = append(ents[:e.Index-w.start.Index-1], e) + } + w.enti = e.Index + case stateType: + state = mustUnmarshalState(rec.Data) + case metadataType: + if metadata != nil && !bytes.Equal(metadata, rec.Data) { + state.Reset() + return nil, state, nil, ErrMetadataConflict + } + metadata = rec.Data + case crcType: + crc := decoder.crc.Sum32() + // current crc of decoder must match the crc of the record. + // do no need to match 0 crc, since the decoder is a new one at this case. + if crc != 0 && rec.Validate(crc) != nil { + state.Reset() + return nil, state, nil, ErrCRCMismatch + } + decoder.updateCRC(rec.Crc) + case snapshotType: + var snap walpb.Snapshot + pbutil.MustUnmarshal(&snap, rec.Data) + if snap.Index == w.start.Index { + if snap.Term != w.start.Term { + state.Reset() + return nil, state, nil, ErrSnapshotMismatch + } + match = true + } + default: + state.Reset() + return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type) + } + } + + switch w.tail() { + case nil: + // We do not have to read out all entries in read mode. + // The last record maybe a partial written one, so + // ErrunexpectedEOF might be returned. + if err != io.EOF && err != io.ErrUnexpectedEOF { + state.Reset() + return nil, state, nil, err + } + default: + // We must read all of the entries if WAL is opened in write mode. + if err != io.EOF { + state.Reset() + return nil, state, nil, err + } + // decodeRecord() will return io.EOF if it detects a zero record, + // but this zero record may be followed by non-zero records from + // a torn write. Overwriting some of these non-zero records, but + // not all, will cause CRC errors on WAL open. Since the records + // were never fully synced to disk in the first place, it's safe + // to zero them out to avoid any CRC errors from new writes. + if _, err = w.tail().Seek(w.decoder.lastOffset(), os.SEEK_SET); err != nil { + return nil, state, nil, err + } + if err = fileutil.ZeroToEnd(w.tail().File); err != nil { + return nil, state, nil, err + } + } + + err = nil + if !match { + err = ErrSnapshotNotFound + } + + // close decoder, disable reading + if w.readClose != nil { + w.readClose() + w.readClose = nil + } + w.start = walpb.Snapshot{} + + w.metadata = metadata + + if w.tail() != nil { + // create encoder (chain crc with the decoder), enable appending + w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC()) + if err != nil { + return + } + } + w.decoder = nil + + return metadata, state, ents, err +} + +// cut closes current file written and creates a new one ready to append. +// cut first creates a temp wal file and writes necessary headers into it. +// Then cut atomically rename temp wal file to a wal file. +func (w *WAL) cut() error { + // close old wal file; truncate to avoid wasting space if an early cut + off, serr := w.tail().Seek(0, os.SEEK_CUR) + if serr != nil { + return serr + } + if err := w.tail().Truncate(off); err != nil { + return err + } + if err := w.sync(); err != nil { + return err + } + + fpath := path.Join(w.dir, walName(w.seq()+1, w.enti+1)) + + // create a temp wal file with name sequence + 1, or truncate the existing one + newTail, err := w.fp.Open() + if err != nil { + return err + } + + // update writer and save the previous crc + w.locks = append(w.locks, newTail) + prevCrc := w.encoder.crc.Sum32() + w.encoder, err = newFileEncoder(w.tail().File, prevCrc) + if err != nil { + return err + } + if err = w.saveCrc(prevCrc); err != nil { + return err + } + if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { + return err + } + if err = w.saveState(&w.state); err != nil { + return err + } + // atomically move temp wal file to wal file + if err = w.sync(); err != nil { + return err + } + + off, err = w.tail().Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + + if err = os.Rename(newTail.Name(), fpath); err != nil { + return err + } + if err = fileutil.Fsync(w.dirFile); err != nil { + return err + } + + newTail.Close() + + if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { + return err + } + if _, err = newTail.Seek(off, os.SEEK_SET); err != nil { + return err + } + + w.locks[len(w.locks)-1] = newTail + + prevCrc = w.encoder.crc.Sum32() + w.encoder, err = newFileEncoder(w.tail().File, prevCrc) + if err != nil { + return err + } + + plog.Infof("segmented wal file %v is created", fpath) + return nil +} + +func (w *WAL) sync() error { + if w.encoder != nil { + if err := w.encoder.flush(); err != nil { + return err + } + } + start := time.Now() + err := fileutil.Fdatasync(w.tail().File) + + duration := time.Since(start) + if duration > warnSyncDuration { + plog.Warningf("sync duration of %v, expected less than %v", duration, warnSyncDuration) + } + syncDurations.Observe(duration.Seconds()) + + return err +} + +// ReleaseLockTo releases the locks, which has smaller index than the given index +// except the largest one among them. +// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release +// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4. +func (w *WAL) ReleaseLockTo(index uint64) error { + w.mu.Lock() + defer w.mu.Unlock() + + var smaller int + found := false + + for i, l := range w.locks { + _, lockIndex, err := parseWalName(path.Base(l.Name())) + if err != nil { + return err + } + if lockIndex >= index { + smaller = i - 1 + found = true + break + } + } + + // if no lock index is greater than the release index, we can + // release lock up to the last one(excluding). + if !found && len(w.locks) != 0 { + smaller = len(w.locks) - 1 + } + + if smaller <= 0 { + return nil + } + + for i := 0; i < smaller; i++ { + if w.locks[i] == nil { + continue + } + w.locks[i].Close() + } + w.locks = w.locks[smaller:] + + return nil +} + +func (w *WAL) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.fp != nil { + w.fp.Close() + w.fp = nil + } + + if w.tail() != nil { + if err := w.sync(); err != nil { + return err + } + } + for _, l := range w.locks { + if l == nil { + continue + } + if err := l.Close(); err != nil { + plog.Errorf("failed to unlock during closing wal: %s", err) + } + } + + return w.dirFile.Close() +} + +func (w *WAL) saveEntry(e *raftpb.Entry) error { + // TODO: add MustMarshalTo to reduce one allocation. + b := pbutil.MustMarshal(e) + rec := &walpb.Record{Type: entryType, Data: b} + if err := w.encoder.encode(rec); err != nil { + return err + } + w.enti = e.Index + return nil +} + +func (w *WAL) saveState(s *raftpb.HardState) error { + if raft.IsEmptyHardState(*s) { + return nil + } + w.state = *s + b := pbutil.MustMarshal(s) + rec := &walpb.Record{Type: stateType, Data: b} + return w.encoder.encode(rec) +} + +func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { + w.mu.Lock() + defer w.mu.Unlock() + + // short cut, do not call sync + if raft.IsEmptyHardState(st) && len(ents) == 0 { + return nil + } + + mustSync := mustSync(st, w.state, len(ents)) + + // TODO(xiangli): no more reference operator + for i := range ents { + if err := w.saveEntry(&ents[i]); err != nil { + return err + } + } + if err := w.saveState(&st); err != nil { + return err + } + + curOff, err := w.tail().Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + if curOff < SegmentSizeBytes { + if mustSync { + return w.sync() + } + return nil + } + + return w.cut() +} + +func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { + b := pbutil.MustMarshal(&e) + + w.mu.Lock() + defer w.mu.Unlock() + + rec := &walpb.Record{Type: snapshotType, Data: b} + if err := w.encoder.encode(rec); err != nil { + return err + } + // update enti only when snapshot is ahead of last index + if w.enti < e.Index { + w.enti = e.Index + } + return w.sync() +} + +func (w *WAL) saveCrc(prevCrc uint32) error { + return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc}) +} + +func (w *WAL) tail() *fileutil.LockedFile { + if len(w.locks) > 0 { + return w.locks[len(w.locks)-1] + } + return nil +} + +func (w *WAL) seq() uint64 { + t := w.tail() + if t == nil { + return 0 + } + seq, _, err := parseWalName(path.Base(t.Name())) + if err != nil { + plog.Fatalf("bad wal name %s (%v)", t.Name(), err) + } + return seq +} + +func mustSync(st, prevst raftpb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} + +func closeAll(rcs ...io.ReadCloser) error { + for _, f := range rcs { + if err := f.Close(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/wal/wal_unix.go b/vendor/github.com/coreos/etcd/wal/wal_unix.go new file mode 100644 index 0000000..82fd6a1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/wal_unix.go @@ -0,0 +1,44 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package wal + +import ( + "os" + + "github.com/coreos/etcd/pkg/fileutil" +) + +func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { + // On non-Windows platforms, hold the lock while renaming. Releasing + // the lock and trying to reacquire it quickly can be flaky because + // it's possible the process will fork to spawn a process while this is + // happening. The fds are set up as close-on-exec by the Go runtime, + // but there is a window between the fork and the exec where another + // process holds the lock. + + if err := os.RemoveAll(w.dir); err != nil { + return nil, err + } + if err := os.Rename(tmpdirpath, w.dir); err != nil { + return nil, err + } + + w.fp = newFilePipeline(w.dir, SegmentSizeBytes) + df, err := fileutil.OpenDir(w.dir) + w.dirFile = df + return w, err +} diff --git a/vendor/github.com/coreos/etcd/wal/wal_windows.go b/vendor/github.com/coreos/etcd/wal/wal_windows.go new file mode 100644 index 0000000..0b9e434 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/wal_windows.go @@ -0,0 +1,41 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "os" + + "github.com/coreos/etcd/wal/walpb" +) + +func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { + // rename of directory with locked files doesn't work on + // windows; close the WAL to release the locks so the directory + // can be renamed + w.Close() + if err := os.Rename(tmpdirpath, w.dir); err != nil { + return nil, err + } + // reopen and relock + newWAL, oerr := Open(w.dir, walpb.Snapshot{}) + if oerr != nil { + return nil, oerr + } + if _, _, _, err := newWAL.ReadAll(); err != nil { + newWAL.Close() + return nil, err + } + return newWAL, nil +} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.go b/vendor/github.com/coreos/etcd/wal/walpb/record.go new file mode 100644 index 0000000..30a05e0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.go @@ -0,0 +1,29 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package walpb + +import "errors" + +var ( + ErrCRCMismatch = errors.New("walpb: crc mismatch") +) + +func (rec *Record) Validate(crc uint32) error { + if rec.Crc == crc { + return nil + } + rec.Reset() + return ErrCRCMismatch +} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go new file mode 100644 index 0000000..e1a77d5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go @@ -0,0 +1,521 @@ +// Code generated by protoc-gen-gogo. +// source: record.proto +// DO NOT EDIT! + +/* + Package walpb is a generated protocol buffer package. + + It is generated from these files: + record.proto + + It has these top-level messages: + Record + Snapshot +*/ +package walpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Record struct { + Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` + Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Record) Reset() { *m = Record{} } +func (m *Record) String() string { return proto.CompactTextString(m) } +func (*Record) ProtoMessage() {} +func (*Record) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{0} } + +type Snapshot struct { + Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{1} } + +func init() { + proto.RegisterType((*Record)(nil), "walpb.Record") + proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") +} +func (m *Record) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Record) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Crc)) + if m.Data != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRecord(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Record(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Record(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRecord(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Record) Size() (n int) { + var l int + _ = l + n += 1 + sovRecord(uint64(m.Type)) + n += 1 + sovRecord(uint64(m.Crc)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRecord(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + n += 1 + sovRecord(uint64(m.Index)) + n += 1 + sovRecord(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRecord(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRecord(x uint64) (n int) { + return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Record) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Record: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) + } + m.Crc = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Crc |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRecord + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRecord(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRecord + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRecord(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRecord + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRecord(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRecord + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRecord(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) } + +var fileDescriptorRecord = []byte{ + // 186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, + 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, + 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, + 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45, + 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a, + 0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11, + 0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a, + 0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b, + 0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc, + 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.proto b/vendor/github.com/coreos/etcd/wal/walpb/record.proto new file mode 100644 index 0000000..b694cb2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.proto @@ -0,0 +1,20 @@ +syntax = "proto2"; +package walpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message Record { + optional int64 type = 1 [(gogoproto.nullable) = false]; + optional uint32 crc = 2 [(gogoproto.nullable) = false]; + optional bytes data = 3; +} + +message Snapshot { + optional uint64 index = 1 [(gogoproto.nullable) = false]; + optional uint64 term = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md new file mode 100644 index 0000000..cb87a11 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/README.md @@ -0,0 +1,54 @@ +# go-systemd + +[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd) +[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd) + +Go bindings to systemd. The project has several packages: + +- `activation` - for writing and using socket activation from Go +- `dbus` - for starting/stopping/inspecting running services and units +- `journal` - for writing to systemd's logging service, journald +- `sdjournal` - for reading from journald by wrapping its C API +- `machine1` - for registering machines/containers with systemd +- `unit` - for (de)serialization and comparison of unit files + +## Socket Activation + +An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd: + +https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver + +## Journal + +Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry. +The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available. + +## D-Bus + +The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here: + +http://godoc.org/github.com/coreos/go-systemd/dbus + +### Debugging + +Create `/etc/dbus-1/system-local.conf` that looks like this: + +``` + + + + + + + +``` + +## machined + +The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/). + +## Units + +The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html). diff --git a/vendor/github.com/coreos/go-systemd/activation/files.go b/vendor/github.com/coreos/go-systemd/activation/files.go new file mode 100644 index 0000000..c8e85fc --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/files.go @@ -0,0 +1,52 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "syscall" +) + +// based on: https://gist.github.com/alberts/4640792 +const ( + listenFdsStart = 3 +) + +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + defer os.Unsetenv("LISTEN_PID") + defer os.Unsetenv("LISTEN_FDS") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + files := make([]*os.File, 0, nfds) + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) + } + + return files +} diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go new file mode 100644 index 0000000..fd5dfc7 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go @@ -0,0 +1,60 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "crypto/tls" + "net" +) + +// Listeners returns a slice containing a net.Listener for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} +func Listeners(unsetEnv bool) ([]net.Listener, error) { + files := Files(unsetEnv) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + if pc, err := net.FileListener(f); err == nil { + listeners[i] = pc + } + } + return listeners, nil +} + +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type +// passed to this process. +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. +func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) { + listeners, err := Listeners(unsetEnv) + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil && err == nil { + for i, l := range listeners { + // Activate TLS only for TCP sockets + if l.Addr().Network() == "tcp" { + listeners[i] = tls.NewListener(l, tlsConfig) + } + } + } + + return listeners, err +} diff --git a/vendor/github.com/coreos/go-systemd/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/activation/packetconns.go new file mode 100644 index 0000000..48b2ca0 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/packetconns.go @@ -0,0 +1,37 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "net" +) + +// PacketConns returns a slice containing a net.PacketConn for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} +func PacketConns(unsetEnv bool) ([]net.PacketConn, error) { + files := Files(unsetEnv) + conns := make([]net.PacketConn, len(files)) + + for i, f := range files { + if pc, err := net.FilePacketConn(f); err == nil { + conns[i] = pc + } + } + return conns, nil +} diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go new file mode 100644 index 0000000..ba6d41d --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go @@ -0,0 +1,63 @@ +// Copyright 2014 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Code forked from Docker project +package daemon + +import ( + "net" + "os" +) + +// SdNotify sends a message to the init daemon. It is common to ignore the error. +// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET` +// will be unconditionally unset. +// +// It returns one of the following: +// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset) +// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data) +// (true, nil) - notification supported, data has been sent +func SdNotify(unsetEnvironment bool, state string) (sent bool, err error) { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + // NOTIFY_SOCKET not set + if socketAddr.Name == "" { + return false, nil + } + + if unsetEnvironment { + err = os.Unsetenv("NOTIFY_SOCKET") + } + if err != nil { + return false, err + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + // Error connecting to NOTIFY_SOCKET + if err != nil { + return false, err + } + defer conn.Close() + + _, err = conn.Write([]byte(state)) + // Error sending the message + if err != nil { + return false, err + } + return true, nil +} diff --git a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go new file mode 100644 index 0000000..35a92e6 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go @@ -0,0 +1,72 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package daemon + +import ( + "fmt" + "os" + "strconv" + "time" +) + +// SdWatchdogEnabled return watchdog information for a service. +// Process should send daemon.SdNotify("WATCHDOG=1") every time / 2. +// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` +// and `WATCHDOG_PID` will be unconditionally unset. +// +// It returns one of the following: +// (0, nil) - watchdog isn't enabled or we aren't the watched PID. +// (0, err) - an error happened (e.g. error converting time). +// (time, nil) - watchdog is enabled and we can send ping. +// time is delay before inactive service will be killed. +func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) { + wusec := os.Getenv("WATCHDOG_USEC") + wpid := os.Getenv("WATCHDOG_PID") + if unsetEnvironment { + wusecErr := os.Unsetenv("WATCHDOG_USEC") + wpidErr := os.Unsetenv("WATCHDOG_PID") + if wusecErr != nil { + return 0, wusecErr + } + if wpidErr != nil { + return 0, wpidErr + } + } + + if wusec == "" { + return 0, nil + } + s, err := strconv.Atoi(wusec) + if err != nil { + return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err) + } + if s <= 0 { + return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number") + } + interval := time.Duration(s) * time.Microsecond + + if wpid == "" { + return interval, nil + } + p, err := strconv.Atoi(wpid) + if err != nil { + return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err) + } + if os.Getpid() != p { + return 0, nil + } + + return interval, nil +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 0000000..c1694fb --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,213 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } +} + +// New establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func New() (*Conn, error) { + conn, err := NewSystemConnection() + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnection() + } + return conn, err +} + +// NewSystemConnection establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection +func NewSystemConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} + +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func() (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 0000000..ab17f7c --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,565 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "path" + "strconv" + + "github.com/godbus/dbus" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetServiceProperty returns property for given service name and property name +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// ListUnitsFiltered returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// ListUnitsByPatterns returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// ListUnitsByNames returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// MaskUnitFiles masks one or more units in the system +// +// It takes three arguments: +// * list of units to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +// * force flag +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// UnmaskUnitFiles unmasks one or more units in the system +// +// It takes two arguments: +// * list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 0000000..6c81895 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 0000000..f92e6fb --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 0000000..9964514 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,250 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + c.subscriber.updateCh = updateCh + c.subscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + + if c.shouldIgnore(path) { + return + } + + info, err := c.GetUnitProperties(string(path)) + if err != nil { + select { + case c.subscriber.errCh <- err: + default: + } + } + + name := info["Id"].(string) + substate := info["SubState"].(string) + + update := &SubStateUpdate{name, substate} + select { + case c.subscriber.updateCh <- update: + default: + select { + case c.subscriber.errCh <- errors.New("update channel full!"): + default: + } + } + + c.updateIgnore(path, info) +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + c.cleanIgnore() + + // unit is unloaded - it will trigger bad systemd dbus behavior + if info["LoadState"].(string) == "not-found" { + c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subscriber.cleanIgnore < now { + c.subscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subscriber.ignore { + if t < now { + delete(c.subscriber.ignore, p) + } + } + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 0000000..5b408d5 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 0000000..7f43499 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,179 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var conn net.Conn + +func init() { + var err error + conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") + if err != nil { + conn = nil + } +} + +// Enabled returns true if the local systemd journal is available for logging +func Enabled() bool { + return conn != nil +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + if conn == nil { + return journalError("could not connect to journald socket") + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, err := io.Copy(conn, data) + if err != nil && isSocketSpaceError(err) { + file, err := tempFd() + if err != nil { + return journalError(err.Error()) + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return journalError(err.Error()) + } + + rights := syscall.UnixRights(int(file.Fd())) + + /* this connection should always be a UnixConn, but better safe than sorry */ + unixConn, ok := conn.(*net.UnixConn) + if !ok { + return journalError("can't send file through non-Unix connection") + } + unixConn.WriteMsgUnix([]byte{}, rights, nil) + } else if err != nil { + return journalError(err.Error()) + } + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if !validVarName(name) { + journalError("variable name contains invalid character, ignoring") + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +func validVarName(name string) bool { + /* The variable name must be in uppercase and consist only of characters, + * numbers and underscores, and may not begin with an underscore. (from the docs) + */ + + valid := name[0] != '_' + for _, c := range name { + valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + } + return valid +} + +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok { + return false + } + + sysErr, ok := opErr.Err.(syscall.Errno) + if !ok { + return false + } + + return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS +} + +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +func journalError(s string) error { + s = "journal error: " + s + fmt.Fprintln(os.Stderr, s) + return errors.New(s) +} diff --git a/vendor/github.com/coreos/go-systemd/util/util.go b/vendor/github.com/coreos/go-systemd/util/util.go new file mode 100644 index 0000000..7828ce6 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util.go @@ -0,0 +1,90 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package util contains utility functions related to systemd that applications +// can use to check things like whether systemd is running. Note that some of +// these functions attempt to manually load systemd libraries at runtime rather +// than linking against them. +package util + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + ErrNoCGO = fmt.Errorf("go-systemd built with CGO disabled") +) + +// GetRunningSlice attempts to retrieve the name of the systemd slice in which +// the current process is running. +// This function is a wrapper around the libsystemd C library; if it cannot be +// opened, an error is returned. +func GetRunningSlice() (string, error) { + return getRunningSlice() +} + +// RunningFromSystemService tries to detect whether the current process has +// been invoked from a system service. The condition for this is whether the +// process is _not_ a user process. User processes are those running in session +// scopes or under per-user `systemd --user` instances. +// +// To avoid false positives on systems without `pam_systemd` (which is +// responsible for creating user sessions), this function also uses a heuristic +// to detect whether it's being invoked from a session leader process. This is +// the case if the current process is executed directly from a service file +// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the +// command is instead launched in a subshell or similar so that it is not +// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`) +// +// This function is a wrapper around the libsystemd C library; if this is +// unable to successfully open a handle to the library for any reason (e.g. it +// cannot be found), an error will be returned. +func RunningFromSystemService() (bool, error) { + return runningFromSystemService() +} + +// CurrentUnitName attempts to retrieve the name of the systemd system unit +// from which the calling process has been invoked. It wraps the systemd +// `sd_pid_get_unit` call, with the same caveat: for processes not part of a +// systemd system unit, this function will return an error. +func CurrentUnitName() (string, error) { + return currentUnitName() +} + +// IsRunningSystemd checks whether the host was booted with systemd as its init +// system. This functions similarly to systemd's `sd_booted(3)`: internally, it +// checks whether /run/systemd/system/ exists and is a directory. +// http://www.freedesktop.org/software/systemd/man/sd_booted.html +func IsRunningSystemd() bool { + fi, err := os.Lstat("/run/systemd/system") + if err != nil { + return false + } + return fi.IsDir() +} + +// GetMachineID returns a host's 128-bit machine ID as a string. This functions +// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads +// the contents of /etc/machine-id +// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html +func GetMachineID() (string, error) { + machineID, err := ioutil.ReadFile("/etc/machine-id") + if err != nil { + return "", fmt.Errorf("failed to read /etc/machine-id: %v", err) + } + return strings.TrimSpace(string(machineID)), nil +} diff --git a/vendor/github.com/coreos/go-systemd/util/util_cgo.go b/vendor/github.com/coreos/go-systemd/util/util_cgo.go new file mode 100644 index 0000000..22c0d60 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util_cgo.go @@ -0,0 +1,174 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build cgo + +package util + +// #include +// #include +// #include +// +// int +// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid) +// { +// int (*sd_pid_get_owner_uid)(pid_t, uid_t *); +// +// sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f; +// return sd_pid_get_owner_uid(pid, uid); +// } +// +// int +// my_sd_pid_get_unit(void *f, pid_t pid, char **unit) +// { +// int (*sd_pid_get_unit)(pid_t, char **); +// +// sd_pid_get_unit = (int (*)(pid_t, char **))f; +// return sd_pid_get_unit(pid, unit); +// } +// +// int +// my_sd_pid_get_slice(void *f, pid_t pid, char **slice) +// { +// int (*sd_pid_get_slice)(pid_t, char **); +// +// sd_pid_get_slice = (int (*)(pid_t, char **))f; +// return sd_pid_get_slice(pid, slice); +// } +// +// int +// am_session_leader() +// { +// return (getsid(0) == getpid()); +// } +import "C" +import ( + "fmt" + "syscall" + "unsafe" + + "github.com/coreos/pkg/dlopen" +) + +var libsystemdNames = []string{ + // systemd < 209 + "libsystemd-login.so.0", + "libsystemd-login.so", + + // systemd >= 209 merged libsystemd-login into libsystemd proper + "libsystemd.so.0", + "libsystemd.so", +} + +func getRunningSlice() (slice string, err error) { + var h *dlopen.LibHandle + h, err = dlopen.GetHandle(libsystemdNames) + if err != nil { + return + } + defer func() { + if err1 := h.Close(); err1 != nil { + err = err1 + } + }() + + sd_pid_get_slice, err := h.GetSymbolPointer("sd_pid_get_slice") + if err != nil { + return + } + + var s string + sl := C.CString(s) + defer C.free(unsafe.Pointer(sl)) + + ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl) + if ret < 0 { + err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret)) + return + } + + return C.GoString(sl), nil +} + +func runningFromSystemService() (ret bool, err error) { + var h *dlopen.LibHandle + h, err = dlopen.GetHandle(libsystemdNames) + if err != nil { + return + } + defer func() { + if err1 := h.Close(); err1 != nil { + err = err1 + } + }() + + sd_pid_get_owner_uid, err := h.GetSymbolPointer("sd_pid_get_owner_uid") + if err != nil { + return + } + + var uid C.uid_t + errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid) + serrno := syscall.Errno(-errno) + // when we're running from a unit file, sd_pid_get_owner_uid returns + // ENOENT (systemd <220) or ENXIO (systemd >=220) + switch { + case errno >= 0: + ret = false + case serrno == syscall.ENOENT, serrno == syscall.ENXIO: + // Since the implementation of sessions in systemd relies on + // the `pam_systemd` module, using the sd_pid_get_owner_uid + // heuristic alone can result in false positives if that module + // (or PAM itself) is not present or properly configured on the + // system. As such, we also check if we're the session leader, + // which should be the case if we're invoked from a unit file, + // but not if e.g. we're invoked from the command line from a + // user's login session + ret = C.am_session_leader() == 1 + default: + err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno)) + } + return +} + +func currentUnitName() (unit string, err error) { + var h *dlopen.LibHandle + h, err = dlopen.GetHandle(libsystemdNames) + if err != nil { + return + } + defer func() { + if err1 := h.Close(); err1 != nil { + err = err1 + } + }() + + sd_pid_get_unit, err := h.GetSymbolPointer("sd_pid_get_unit") + if err != nil { + return + } + + var s string + u := C.CString(s) + defer C.free(unsafe.Pointer(u)) + + ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u) + if ret < 0 { + err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret)) + return + } + + unit = C.GoString(u) + return +} diff --git a/vendor/github.com/coreos/go-systemd/util/util_stub.go b/vendor/github.com/coreos/go-systemd/util/util_stub.go new file mode 100644 index 0000000..477589e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util_stub.go @@ -0,0 +1,23 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !cgo + +package util + +func getRunningSlice() (string, error) { return "", ErrNoCGO } + +func runningFromSystemService() (bool, error) { return false, ErrNoCGO } + +func currentUnitName() (string, error) { return "", ErrNoCGO } diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 0000000..e06d208 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 0000000..b39ddfa --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/README.md b/vendor/github.com/coreos/pkg/README.md new file mode 100644 index 0000000..ca68a07 --- /dev/null +++ b/vendor/github.com/coreos/pkg/README.md @@ -0,0 +1,4 @@ +a collection of go utility packages + +[![Build Status](https://travis-ci.org/coreos/pkg.png?branch=master)](https://travis-ci.org/coreos/pkg) +[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/coreos/pkg) diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 0000000..81efb1f --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 0000000..b305a84 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 0000000..426603e --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 0000000..44b8cd3 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,49 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` pacakge uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 0000000..4553050 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 0000000..72e0520 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,68 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 0000000..970086b --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 0000000..8495448 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,240 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 0000000..612d55c --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,177 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 0000000..4be5a1f --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen.go b/vendor/github.com/coreos/pkg/dlopen/dlopen.go new file mode 100644 index 0000000..23774f6 --- /dev/null +++ b/vendor/github.com/coreos/pkg/dlopen/dlopen.go @@ -0,0 +1,82 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dlopen provides some convenience functions to dlopen a library and +// get its symbols. +package dlopen + +// #cgo LDFLAGS: -ldl +// #include +// #include +import "C" +import ( + "errors" + "fmt" + "unsafe" +) + +var ErrSoNotFound = errors.New("unable to open a handle to the library") + +// LibHandle represents an open handle to a library (.so) +type LibHandle struct { + Handle unsafe.Pointer + Libname string +} + +// GetHandle tries to get a handle to a library (.so), attempting to access it +// by the names specified in libs and returning the first that is successfully +// opened. Callers are responsible for closing the handler. If no library can +// be successfully opened, an error is returned. +func GetHandle(libs []string) (*LibHandle, error) { + for _, name := range libs { + libname := C.CString(name) + defer C.free(unsafe.Pointer(libname)) + handle := C.dlopen(libname, C.RTLD_LAZY) + if handle != nil { + h := &LibHandle{ + Handle: handle, + Libname: name, + } + return h, nil + } + } + return nil, ErrSoNotFound +} + +// GetSymbolPointer takes a symbol name and returns a pointer to the symbol. +func (l *LibHandle) GetSymbolPointer(symbol string) (unsafe.Pointer, error) { + sym := C.CString(symbol) + defer C.free(unsafe.Pointer(sym)) + + C.dlerror() + p := C.dlsym(l.Handle, sym) + e := C.dlerror() + if e != nil { + return nil, fmt.Errorf("error resolving symbol %q: %v", symbol, errors.New(C.GoString(e))) + } + + return p, nil +} + +// Close closes a LibHandle. +func (l *LibHandle) Close() error { + C.dlerror() + C.dlclose(l.Handle) + e := C.dlerror() + if e != nil { + return fmt.Errorf("error closing %v: %v", l.Libname, errors.New(C.GoString(e))) + } + + return nil +} diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go new file mode 100644 index 0000000..48a6601 --- /dev/null +++ b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go @@ -0,0 +1,56 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build linux + +package dlopen + +// #include +// #include +// +// int +// my_strlen(void *f, const char *s) +// { +// size_t (*strlen)(const char *); +// +// strlen = (size_t (*)(const char *))f; +// return strlen(s); +// } +import "C" + +import ( + "fmt" + "unsafe" +) + +func strlen(libs []string, s string) (int, error) { + h, err := GetHandle(libs) + if err != nil { + return -1, fmt.Errorf(`couldn't get a handle to the library: %v`, err) + } + defer h.Close() + + f := "strlen" + cs := C.CString(s) + defer C.free(unsafe.Pointer(cs)) + + strlen, err := h.GetSymbolPointer(f) + if err != nil { + return -1, fmt.Errorf(`couldn't get symbol %q: %v`, f, err) + } + + len := C.my_strlen(strlen, cs) + + return int(len), nil +} diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE b/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE new file mode 100644 index 0000000..fdfc8ab --- /dev/null +++ b/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE @@ -0,0 +1,22 @@ +Copyright 2015 Timothée Peignier. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/README.md b/vendor/github.com/cyberdelia/go-metrics-graphite/README.md new file mode 100644 index 0000000..e43f4df --- /dev/null +++ b/vendor/github.com/cyberdelia/go-metrics-graphite/README.md @@ -0,0 +1,19 @@ +This is a reporter for the [go-metrics](https://github.com/rcrowley/go-metrics) +library which will post the metrics to Graphite. It was originally part of the +`go-metrics` library itself, but has been split off to make maintenance of +both the core library and the client easier. + +### Usage + +```go +import "github.com/cyberdelia/go-metrics-graphite" + + +go graphite.Graphite(metrics.DefaultRegistry, + 1*time.Second, "some.prefix", addr) +``` + +### Migrating from `rcrowley/go-metrics` implementation + +Simply modify the import from `"github.com/rcrowley/go-metrics/librato"` to +`"github.com/cyberdelia/go-metrics-graphite"` and it should Just Work. diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go b/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go new file mode 100644 index 0000000..ffc309b --- /dev/null +++ b/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go @@ -0,0 +1,113 @@ +package graphite + +import ( + "bufio" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" + + "github.com/rcrowley/go-metrics" +) + +// GraphiteConfig provides a container with configuration parameters for +// the Graphite exporter +type GraphiteConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry metrics.Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// Graphite is a blocking exporter function which reports metrics in r +// to a graphite server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func Graphite(r metrics.Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + GraphiteWithConfig(GraphiteConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// GraphiteWithConfig is a blocking exporter function just like Graphite, +// but it takes a GraphiteConfig instead. +func GraphiteWithConfig(c GraphiteConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := graphite(&c); nil != err { + log.Println(err) + } + } +} + +// GraphiteOnce performs a single submission to Graphite, returning a +// non-nil error on failed connections. This can be used in a loop +// similar to GraphiteWithConfig for custom error handling. +func GraphiteOnce(c GraphiteConfig) error { + return graphite(&c) +} + +func graphite(c *GraphiteConfig) error { + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case metrics.Counter: + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case metrics.Gauge: + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + case metrics.GaugeFloat64: + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case metrics.Histogram: + h := metric.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + case metrics.Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) + case metrics.Timer: + t := metric.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx]/du, now) + } + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..c836416 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md new file mode 100644 index 0000000..2624304 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -0,0 +1,205 @@ +go-spew +======= + +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] +(https://coveralls.io/r/davecgh/go-spew?branch=master) + + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get -u github.com/davecgh/go-spew/spew +``` + +## Quick Start + +Add this import line to the file you're working in: + +```Go +import "github.com/davecgh/go-spew/spew" +``` + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Debugging a Web Application Example + +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. + +```Go +package main + +import ( + "fmt" + "html" + "net/http" + + "github.com/davecgh/go-spew/spew" +) + +func handler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) + fmt.Fprintf(w, "") +} + +func main() { + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. This option + relies on access to the unsafe package, so it will not have any effect when + running in environments without access to the unsafe package such as Google + App Engine or with the "safe" build tag specified. + Pointer method invocation is enabled by default. + +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are supported, + with other types sorted according to the reflect.Value.String() output + which guarantees display stability. Natural map order is used by + default. + +* SpewKeys + SpewKeys specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only considered + if SortKeys is true. + +``` + +## Unsafe Package Dependency + +This package relies on the unsafe package to perform some of the more advanced +features, however it also supports a "limited" mode which allows it to work in +environments where the unsafe package is not available. By default, it will +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. + +## License + +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..8a4a658 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..1fe3cf3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..7c519ff --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..df1d582 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..c49875b --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/deckarep/golang-set/LICENSE b/vendor/github.com/deckarep/golang-set/LICENSE new file mode 100644 index 0000000..b5768f8 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/LICENSE @@ -0,0 +1,22 @@ +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/README.md b/vendor/github.com/deckarep/golang-set/README.md new file mode 100644 index 0000000..744b184 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/README.md @@ -0,0 +1,94 @@ +[![Build Status](https://travis-ci.org/deckarep/golang-set.png?branch=master)](https://travis-ci.org/deckarep/golang-set) +[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.png)](http://godoc.org/github.com/deckarep/golang-set) + +## golang-set + + +The missing set collection for the Go language. Until Go has sets built-in...use this. + +Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python. +You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository +and carry-on and to the rest that find this useful please contribute in helping me make it better by: + +* Helping to make more idiomatic improvements to the code. +* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~ +* Helping to make the unit-tests more robust and kick-ass. +* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set) +* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.) + +I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang) + +*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types. + +## Features (as of 9/22/2014) + +* a CartesionProduct() method has been added with unit-tests: [Read more about the cartesion product](http://en.wikipedia.org/wiki/Cartesian_product) + +## Features (as of 9/15/2014) + +* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set) + +## Features (as of 4/22/2014) + +* One common interface to both implementations +* Two set implementations to choose from + * a thread-safe implementation designed for concurrent use + * a non-thread-safe implementation designed for performance +* 75 benchmarks for both implementations +* 35 unit tests for both implementations +* 14 concurrent tests for the thread-safe implementation + + + +Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind +however that the Python set is a built-in type and supports additional features and syntax that make it awesome. + +## Examples but not exhaustive: + +```go +requiredClasses := mapset.NewSet() +requiredClasses.Add("Cooking") +requiredClasses.Add("English") +requiredClasses.Add("Math") +requiredClasses.Add("Biology") + +scienceSlice := []interface{}{"Biology", "Chemistry"} +scienceClasses := mapset.NewSetFromSlice(scienceSlice) + +electiveClasses := mapset.NewSet() +electiveClasses.Add("Welding") +electiveClasses.Add("Music") +electiveClasses.Add("Automotive") + +bonusClasses := mapset.NewSet() +bonusClasses.Add("Go Programming") +bonusClasses.Add("Python Programming") + +//Show me all the available classes I can take +allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) +fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming} + + +//Is cooking considered a science class? +fmt.Println(scienceClasses.Contains("Cooking")) //false + +//Show me all classes that are not science classes, since I hate science. +fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding} + +//Which science classes are also required classes? +fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} + +//How many bonus classes do you offer? +fmt.Println(bonusClasses.Cardinality()) //2 + +//Do you have the following classes? Welding, Automotive and English? +fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true +``` + +Thanks! + +-Ralph + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + +[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon) diff --git a/vendor/github.com/deckarep/golang-set/set.go b/vendor/github.com/deckarep/golang-set/set.go new file mode 100644 index 0000000..eccba70 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/set.go @@ -0,0 +1,168 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package mapset implements a simple and generic set collection. +// Items stored within it are unordered and unique. It supports +// typical set operations: membership testing, intersection, union, +// difference, symmetric difference and cloning. +// +// Package mapset provides two implementations. The default +// implementation is safe for concurrent access. There is a non-threadsafe +// implementation which is slightly more performant. +package mapset + +type Set interface { + // Adds an element to the set. Returns whether + // the item was added. + Add(i interface{}) bool + + // Returns the number of elements in the set. + Cardinality() int + + // Removes all elements from the set, leaving + // the emtpy set. + Clear() + + // Returns a clone of the set using the same + // implementation, duplicating all keys. + Clone() Set + + // Returns whether the given items + // are all in the set. + Contains(i ...interface{}) bool + + // Returns the difference between this set + // and other. The returned set will contain + // all elements of this set that are not also + // elements of other. + // + // Note that the argument to Difference + // must be of the same type as the receiver + // of the method. Otherwise, Difference will + // panic. + Difference(other Set) Set + + // Determines if two sets are equal to each + // other. If they have the same cardinality + // and contain the same elements, they are + // considered equal. The order in which + // the elements were added is irrelevant. + // + // Note that the argument to Equal must be + // of the same type as the receiver of the + // method. Otherwise, Equal will panic. + Equal(other Set) bool + + // Returns a new set containing only the elements + // that exist only in both sets. + // + // Note that the argument to Intersect + // must be of the same type as the receiver + // of the method. Otherwise, Intersect will + // panic. + Intersect(other Set) Set + + // Determines if every element in the other set + // is in this set. + // + // Note that the argument to IsSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsSubset will + // panic. + IsSubset(other Set) bool + + // Determines if every element in this set is in + // the other set. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsSuperset(other Set) bool + + // Returns a channel of elements that you can + // range over. + Iter() <-chan interface{} + + // Remove a single element from the set. + Remove(i interface{}) + + // Provides a convenient string representation + // of the current state of the set. + String() string + + // Returns a new set with all elements which are + // in either this set or the other set but not in both. + // + // Note that the argument to SymmetricDifference + // must be of the same type as the receiver + // of the method. Otherwise, SymmetricDifference + // will panic. + SymmetricDifference(other Set) Set + + // Returns a new set with all elements in both sets. + // + // Note that the argument to Union must be of the + // same type as the receiver of the method. + // Otherwise, IsSuperset will panic. + Union(other Set) Set + + // Returns all subsets of a given set (Power Set). + PowerSet() Set + + // Returns the Cartesian Product of two sets. + CartesianProduct(other Set) Set + + // Returns the members of the set as a slice. + ToSlice() []interface{} +} + +// Creates and returns a reference to an empty set. +func NewSet() Set { + set := newThreadSafeSet() + return &set +} + +// Creates and returns a reference to a set from an existing slice +func NewSetFromSlice(s []interface{}) Set { + a := NewSet() + for _, item := range s { + a.Add(item) + } + return a +} + +func NewThreadUnsafeSet() Set { + set := newThreadUnsafeSet() + return &set +} + +func NewThreadUnsafeSetFromSlice(s []interface{}) Set { + a := NewThreadUnsafeSet() + for _, item := range s { + a.Add(item) + } + return a +} diff --git a/vendor/github.com/deckarep/golang-set/threadsafe.go b/vendor/github.com/deckarep/golang-set/threadsafe.go new file mode 100644 index 0000000..9dca94a --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadsafe.go @@ -0,0 +1,204 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import "sync" + +type threadSafeSet struct { + s threadUnsafeSet + sync.RWMutex +} + +func newThreadSafeSet() threadSafeSet { + return threadSafeSet{s: newThreadUnsafeSet()} +} + +func (set *threadSafeSet) Add(i interface{}) bool { + set.Lock() + ret := set.s.Add(i) + set.Unlock() + return ret +} + +func (set *threadSafeSet) Contains(i ...interface{}) bool { + set.RLock() + ret := set.s.Contains(i...) + set.RUnlock() + return ret +} + +func (set *threadSafeSet) IsSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.IsSubset(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadSafeSet) Union(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeUnion} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Intersect(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeIntersection} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Difference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) SymmetricDifference(other Set) Set { + o := other.(*threadSafeSet) + + unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) + return &threadSafeSet{s: *unsafeDifference} +} + +func (set *threadSafeSet) Clear() { + set.Lock() + set.s = newThreadUnsafeSet() + set.Unlock() +} + +func (set *threadSafeSet) Remove(i interface{}) { + set.Lock() + delete(set.s, i) + set.Unlock() +} + +func (set *threadSafeSet) Cardinality() int { + set.RLock() + defer set.RUnlock() + return len(set.s) +} + +func (set *threadSafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + set.RLock() + + for elem := range set.s { + ch <- elem + } + close(ch) + set.RUnlock() + }() + + return ch +} + +func (set *threadSafeSet) Equal(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.Equal(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clone() Set { + set.RLock() + + unsafeClone := set.s.Clone().(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeClone} + set.RUnlock() + return ret +} + +func (set *threadSafeSet) String() string { + set.RLock() + ret := set.s.String() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) PowerSet() Set { + set.RLock() + ret := set.s.PowerSet() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) CartesianProduct(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeCartProduct} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) ToSlice() []interface{} { + set.RLock() + keys := make([]interface{}, 0, set.Cardinality()) + for elem := range set.s { + keys = append(keys, elem) + } + set.RUnlock() + return keys +} diff --git a/vendor/github.com/deckarep/golang-set/threadunsafe.go b/vendor/github.com/deckarep/golang-set/threadunsafe.go new file mode 100644 index 0000000..124521e --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadunsafe.go @@ -0,0 +1,246 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "fmt" + "reflect" + "strings" +) + +type threadUnsafeSet map[interface{}]struct{} + +type orderedPair struct { + first interface{} + second interface{} +} + +func newThreadUnsafeSet() threadUnsafeSet { + return make(threadUnsafeSet) +} + +func (pair *orderedPair) Equal(other orderedPair) bool { + if pair.first == other.first && + pair.second == other.second { + return true + } + + return false +} + +func (set *threadUnsafeSet) Add(i interface{}) bool { + _, found := (*set)[i] + (*set)[i] = struct{}{} + return !found //False if it existed already +} + +func (set *threadUnsafeSet) Contains(i ...interface{}) bool { + for _, val := range i { + if _, ok := (*set)[val]; !ok { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsSubset(other Set) bool { + _ = other.(*threadUnsafeSet) + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadUnsafeSet) Union(other Set) Set { + o := other.(*threadUnsafeSet) + + unionedSet := newThreadUnsafeSet() + + for elem := range *set { + unionedSet.Add(elem) + } + for elem := range *o { + unionedSet.Add(elem) + } + return &unionedSet +} + +func (set *threadUnsafeSet) Intersect(other Set) Set { + o := other.(*threadUnsafeSet) + + intersection := newThreadUnsafeSet() + // loop over smaller set + if set.Cardinality() < other.Cardinality() { + for elem := range *set { + if other.Contains(elem) { + intersection.Add(elem) + } + } + } else { + for elem := range *o { + if set.Contains(elem) { + intersection.Add(elem) + } + } + } + return &intersection +} + +func (set *threadUnsafeSet) Difference(other Set) Set { + _ = other.(*threadUnsafeSet) + + difference := newThreadUnsafeSet() + for elem := range *set { + if !other.Contains(elem) { + difference.Add(elem) + } + } + return &difference +} + +func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { + _ = other.(*threadUnsafeSet) + + aDiff := set.Difference(other) + bDiff := other.Difference(set) + return aDiff.Union(bDiff) +} + +func (set *threadUnsafeSet) Clear() { + *set = newThreadUnsafeSet() +} + +func (set *threadUnsafeSet) Remove(i interface{}) { + delete(*set, i) +} + +func (set *threadUnsafeSet) Cardinality() int { + return len(*set) +} + +func (set *threadUnsafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + for elem := range *set { + ch <- elem + } + close(ch) + }() + + return ch +} + +func (set *threadUnsafeSet) Equal(other Set) bool { + _ = other.(*threadUnsafeSet) + + if set.Cardinality() != other.Cardinality() { + return false + } + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) Clone() Set { + clonedSet := newThreadUnsafeSet() + for elem := range *set { + clonedSet.Add(elem) + } + return &clonedSet +} + +func (set *threadUnsafeSet) String() string { + items := make([]string, 0, len(*set)) + + for elem := range *set { + items = append(items, fmt.Sprintf("%v", elem)) + } + return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) +} + +func (pair orderedPair) String() string { + return fmt.Sprintf("(%v, %v)", pair.first, pair.second) +} + +func (set *threadUnsafeSet) PowerSet() Set { + powSet := NewThreadUnsafeSet() + nullset := newThreadUnsafeSet() + powSet.Add(&nullset) + + for es := range *set { + u := newThreadUnsafeSet() + j := powSet.Iter() + for er := range j { + p := newThreadUnsafeSet() + if reflect.TypeOf(er).Name() == "" { + k := er.(*threadUnsafeSet) + for ek := range *(k) { + p.Add(ek) + } + } else { + p.Add(er) + } + p.Add(es) + u.Add(&p) + } + + powSet = powSet.Union(&u) + } + + return powSet +} + +func (set *threadUnsafeSet) CartesianProduct(other Set) Set { + o := other.(*threadUnsafeSet) + cartProduct := NewThreadUnsafeSet() + + for i := range *set { + for j := range *o { + elem := orderedPair{first: i, second: j} + cartProduct.Add(elem) + } + } + + return cartProduct +} + +func (set *threadUnsafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + for elem := range *set { + keys = append(keys, elem) + } + + return keys +} diff --git a/vendor/github.com/docker/cli/LICENSE b/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 0000000..9c8e20a --- /dev/null +++ b/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 0000000..0c74e15 --- /dev/null +++ b/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/README.md b/vendor/github.com/docker/cli/README.md new file mode 100644 index 0000000..340556d --- /dev/null +++ b/vendor/github.com/docker/cli/README.md @@ -0,0 +1,63 @@ +[![build status](https://circleci.com/gh/docker/cli.svg?style=shield)](https://circleci.com/gh/docker/cli/tree/master) + +docker/cli +========== + +This repository is the home of the cli used in the Docker CE and +Docker EE products. + +Development +=========== + +`docker/cli` is developed using Docker. + +Build a linux binary: + +``` +$ make -f docker.Makefile binary +``` + +Build binaries for all supported platforms: + +``` +$ make -f docker.Makefile cross +``` + +Run all linting: + +``` +$ make -f docker.Makefile lint +``` + +### In-container development environment + +Start an interactive development environment: + +``` +$ make -f docker.Makefile shell +``` + +In the development environment you can run many tasks, including build binaries: + +``` +$ make binary +``` + +Legal +===== +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/docker/cli/blob/master/NOTICE) document in this repo.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +Licensing +========= +docker/cli is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. diff --git a/vendor/github.com/docker/cli/VERSION b/vendor/github.com/docker/cli/VERSION new file mode 100644 index 0000000..3e9257a --- /dev/null +++ b/vendor/github.com/docker/cli/VERSION @@ -0,0 +1 @@ +17.07.0-dev diff --git a/vendor/github.com/docker/cli/cli/cobra.go b/vendor/github.com/docker/cli/cli/cobra.go new file mode 100644 index 0000000..114f1b7 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/cobra.go @@ -0,0 +1,150 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/cli/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return nil + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{ wrappedFlagUsages . | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go new file mode 100644 index 0000000..e56c7a7 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/cli.go @@ -0,0 +1,299 @@ +package command + +import ( + "fmt" + "io" + "net/http" + "os" + "runtime" + + "github.com/docker/cli/cli" + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + cliflags "github.com/docker/cli/cli/flags" + dopts "github.com/docker/cli/opts" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/notary/passphrase" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// Streams is an interface which exposes the standard input and output streams +type Streams interface { + In() *InStream + Out() *OutStream + Err() io.Writer +} + +// Cli represents the docker command line client. +type Cli interface { + Client() client.APIClient + Out() *OutStream + Err() io.Writer + In() *InStream + SetIn(in *InStream) + ConfigFile() *configfile.ConfigFile + CredentialsStore(serverAddress string) credentials.Store +} + +// DockerCli is an instance the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + configFile *configfile.ConfigFile + in *InStream + out *OutStream + err io.Writer + client client.APIClient + defaultVersion string + server ServerInfo +} + +// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified. +func (cli *DockerCli) DefaultVersion() string { + return cli.defaultVersion +} + +// Client returns the APIClient +func (cli *DockerCli) Client() client.APIClient { + return cli.client +} + +// Out returns the writer used for stdout +func (cli *DockerCli) Out() *OutStream { + return cli.out +} + +// Err returns the writer used for stderr +func (cli *DockerCli) Err() io.Writer { + return cli.err +} + +// SetIn sets the reader used for stdin +func (cli *DockerCli) SetIn(in *InStream) { + cli.in = in +} + +// In returns the reader used for stdin +func (cli *DockerCli) In() *InStream { + return cli.in +} + +// ShowHelp shows the command help. +func ShowHelp(err io.Writer) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + cmd.SetOutput(err) + cmd.HelpFunc()(cmd, args) + return nil + } +} + +// ConfigFile returns the ConfigFile +func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { + return cli.configFile +} + +// ServerInfo returns the server version details for the host this client is +// connected to +func (cli *DockerCli) ServerInfo() ServerInfo { + return cli.server +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + for registry := range cli.configFile.CredentialHelpers { + helper := cli.CredentialsStore(registry) + newAuths, err := helper.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + } + defaultStore := cli.CredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + return auths, nil +} + +func addAll(to, from map[string]types.AuthConfig) { + for reg, ac := range from { + to[reg] = ac + } +} + +// CredentialsStore returns a new credentials store based +// on the settings provided in the configuration file. Empty string returns +// the default credential store. +func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store { + if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" { + return credentials.NewNativeStore(cli.configFile, helper) + } + return credentials.NewFileStore(cli.configFile) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string { + if c.CredentialHelpers != nil && serverAddress != "" { + if helper, exists := c.CredentialHelpers[serverAddress]; exists { + return helper + } + } + return c.CredentialsStore +} + +// Initialize the dockerCli runs initialization that must happen after command +// line flags are parsed. +func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { + cli.configFile = LoadDefaultConfigFile(cli.err) + + var err error + cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) + if tlsconfig.IsErrEncryptedKey(err) { + var ( + passwd string + giveup bool + ) + passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil) + + for attempts := 0; tlsconfig.IsErrEncryptedKey(err); attempts++ { + // some code and comments borrowed from notary/trustmanager/keystore.go + passwd, giveup, err = passRetriever("private", "encrypted TLS private", false, attempts) + // Check if the passphrase retriever got an error or if it is telling us to give up + if giveup || err != nil { + return errors.Wrap(err, "private key is encrypted, but could not get passphrase") + } + + opts.Common.TLSOptions.Passphrase = passwd + cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) + } + } + + if err != nil { + return err + } + + cli.defaultVersion = cli.client.ClientVersion() + + if ping, err := cli.client.Ping(context.Background()); err == nil { + cli.server = ServerInfo{ + HasExperimental: ping.Experimental, + OSType: ping.OSType, + } + + cli.client.NegotiateAPIVersionPing(ping) + } else { + // Default to true if we fail to connect to daemon + cli.server = ServerInfo{HasExperimental: true} + } + + return nil +} + +// ServerInfo stores details about the supported features and platform of the +// server +type ServerInfo struct { + HasExperimental bool + OSType string +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { + return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { + configFile, e := cliconfig.Load(cliconfig.Dir()) + if e != nil { + fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) + } + if !configFile.ContainsAuth() { + credentials.DetectDefaultStore(configFile) + } + return configFile +} + +// NewAPIClientFromFlags creates a new APIClient from command line flags +func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { + host, err := getServerHost(opts.Hosts, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + customHeaders := configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = UserAgent() + + verStr := api.DefaultVersion + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + httpClient, err := newHTTPClient(host, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + return client.NewClient(host, verStr, httpClient, customHeaders) +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + host, err = dopts.ParseHost(tlsOptions != nil, host) + return +} + +func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { + if tlsOptions == nil { + // let the api client configure the default transport. + return nil, nil + } + opts := *tlsOptions + opts.ExclusiveRootPools = true + config, err := tlsconfig.Client(opts) + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(host) + if err != nil { + return nil, err + } + + sockets.ConfigureTransport(tr, proto, addr) + + return &http.Client{ + Transport: tr, + CheckRedirect: client.CheckRedirect, + }, nil +} + +// UserAgent returns the user agent string used for making API requests +func UserAgent() string { + return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")" +} diff --git a/vendor/github.com/docker/cli/cli/command/commands/commands.go b/vendor/github.com/docker/cli/cli/command/commands/commands.go new file mode 100644 index 0000000..9f29dac --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/commands/commands.go @@ -0,0 +1,92 @@ +package commands + +import ( + "os" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/container" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/command/network" + "github.com/docker/cli/cli/command/registry" + "github.com/docker/cli/cli/command/system" + "github.com/docker/cli/cli/command/volume" + "github.com/spf13/cobra" +) + +// AddCommands adds all the commands from cli/command to the root command +func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) { + cmd.AddCommand( + // container + container.NewContainerCommand(dockerCli), + container.NewRunCommand(dockerCli), + + // image + image.NewImageCommand(dockerCli), + image.NewBuildCommand(dockerCli), + + // network + network.NewNetworkCommand(dockerCli), + + // registry + registry.NewLoginCommand(dockerCli), + registry.NewLogoutCommand(dockerCli), + registry.NewSearchCommand(dockerCli), + + // system + system.NewSystemCommand(dockerCli), + system.NewVersionCommand(dockerCli), + + // volume + volume.NewVolumeCommand(dockerCli), + + // legacy commands may be hidden + hide(system.NewEventsCommand(dockerCli)), + hide(system.NewInfoCommand(dockerCli)), + hide(system.NewInspectCommand(dockerCli)), + hide(container.NewAttachCommand(dockerCli)), + hide(container.NewCommitCommand(dockerCli)), + hide(container.NewCopyCommand(dockerCli)), + hide(container.NewCreateCommand(dockerCli)), + hide(container.NewDiffCommand(dockerCli)), + hide(container.NewExecCommand(dockerCli)), + hide(container.NewExportCommand(dockerCli)), + hide(container.NewKillCommand(dockerCli)), + hide(container.NewLogsCommand(dockerCli)), + hide(container.NewPauseCommand(dockerCli)), + hide(container.NewPortCommand(dockerCli)), + hide(container.NewPsCommand(dockerCli)), + hide(container.NewRenameCommand(dockerCli)), + hide(container.NewRestartCommand(dockerCli)), + hide(container.NewRmCommand(dockerCli)), + hide(container.NewStartCommand(dockerCli)), + hide(container.NewStatsCommand(dockerCli)), + hide(container.NewStopCommand(dockerCli)), + hide(container.NewTopCommand(dockerCli)), + hide(container.NewUnpauseCommand(dockerCli)), + hide(container.NewUpdateCommand(dockerCli)), + hide(container.NewWaitCommand(dockerCli)), + hide(image.NewHistoryCommand(dockerCli)), + hide(image.NewImagesCommand(dockerCli)), + hide(image.NewImportCommand(dockerCli)), + hide(image.NewLoadCommand(dockerCli)), + hide(image.NewPullCommand(dockerCli)), + hide(image.NewPushCommand(dockerCli)), + hide(image.NewRemoveCommand(dockerCli)), + hide(image.NewSaveCommand(dockerCli)), + hide(image.NewTagCommand(dockerCli)), + ) + +} + +func hide(cmd *cobra.Command) *cobra.Command { + // If the environment variable with name "DOCKER_HIDE_LEGACY_COMMANDS" is not empty, + // these legacy commands (such as `docker ps`, `docker exec`, etc) + // will not be shown in output console. + if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" { + return cmd + } + cmdCopy := *cmd + cmdCopy.Hidden = true + cmdCopy.Aliases = []string{} + return &cmdCopy +} diff --git a/vendor/github.com/docker/cli/cli/command/container/attach.go b/vendor/github.com/docker/cli/cli/command/container/attach.go new file mode 100644 index 0000000..dce6432 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/attach.go @@ -0,0 +1,164 @@ +package container + +import ( + "io" + "net/http/httputil" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/signal" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type attachOptions struct { + noStdin bool + proxy bool + detachKeys string + + container string +} + +func inspectContainerAndCheckState(ctx context.Context, cli client.APIClient, args string) (*types.ContainerJSON, error) { + c, err := cli.ContainerInspect(ctx, args) + if err != nil { + return nil, err + } + if !c.State.Running { + return nil, errors.New("You cannot attach to a stopped container, start it first") + } + if c.State.Paused { + return nil, errors.New("You cannot attach to a paused container, unpause it first") + } + if c.State.Restarting { + return nil, errors.New("You cannot attach to a restarting container, wait until it is running") + } + + return &c, nil +} + +// NewAttachCommand creates a new cobra.Command for `docker attach` +func NewAttachCommand(dockerCli command.Cli) *cobra.Command { + var opts attachOptions + + cmd := &cobra.Command{ + Use: "attach [OPTIONS] CONTAINER", + Short: "Attach local standard input, output, and error streams to a running container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runAttach(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") + flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + return cmd +} + +func runAttach(dockerCli command.Cli, opts *attachOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + c, err := inspectContainerAndCheckState(ctx, client, opts.container) + if err != nil { + return err + } + + if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil { + return err + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: !opts.noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = dockerCli.In() + } + + if opts.proxy && !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, opts.container) + defer signal.StopCatch(sigc) + } + + resp, errAttach := client.ContainerAttach(ctx, opts.container, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + // If use docker attach command to attach to a stop container, it will return + // "You cannot attach to a stopped container" error, it's ok, but when + // attach to a running container, it(docker attach) use inspect to check + // the container's state, if it pass the state check on the client side, + // and then the container is stopped, docker attach command still attach to + // the container and not exit. + // + // Recheck the container's state to avoid attach block. + _, err = inspectContainerAndCheckState(ctx, client, opts.container) + if err != nil { + return err + } + + if c.Config.Tty && dockerCli.Out().IsTerminal() { + height, width := dockerCli.Out().GetTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + resizeTtyTo(ctx, client, opts.container, height+1, width+1, false) + + // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back + // to the actual size. + if err := MonitorTtySize(ctx, dockerCli, opts.container, false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } + } + + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: dockerCli.Out(), + errorStream: dockerCli.Err(), + resp: resp, + tty: c.Config.Tty, + detachKeys: options.DetachKeys, + } + + if err := streamer.stream(ctx); err != nil { + return err + } + + if errAttach != nil { + return errAttach + } + + _, status, err := getExitCode(ctx, dockerCli, opts.container) + if err != nil { + return err + } + if status != 0 { + return cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/cmd.go b/vendor/github.com/docker/cli/cli/command/container/cmd.go new file mode 100644 index 0000000..82500be --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/cmd.go @@ -0,0 +1,46 @@ +package container + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewContainerCommand returns a cobra command for `container` subcommands +// nolint: interfacer +func NewContainerCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "container", + Short: "Manage containers", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewAttachCommand(dockerCli), + NewCommitCommand(dockerCli), + NewCopyCommand(dockerCli), + NewCreateCommand(dockerCli), + NewDiffCommand(dockerCli), + NewExecCommand(dockerCli), + NewExportCommand(dockerCli), + NewKillCommand(dockerCli), + NewLogsCommand(dockerCli), + NewPauseCommand(dockerCli), + NewPortCommand(dockerCli), + NewRenameCommand(dockerCli), + NewRestartCommand(dockerCli), + NewRmCommand(dockerCli), + NewRunCommand(dockerCli), + NewStartCommand(dockerCli), + NewStatsCommand(dockerCli), + NewStopCommand(dockerCli), + NewTopCommand(dockerCli), + NewUnpauseCommand(dockerCli), + NewUpdateCommand(dockerCli), + NewWaitCommand(dockerCli), + newListCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/container/commit.go b/vendor/github.com/docker/cli/cli/command/container/commit.go new file mode 100644 index 0000000..689918e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/commit.go @@ -0,0 +1,75 @@ +package container + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type commitOptions struct { + container string + reference string + + pause bool + comment string + author string + changes opts.ListOpts +} + +// NewCommitCommand creates a new cobra.Command for `docker commit` +func NewCommitCommand(dockerCli *command.DockerCli) *cobra.Command { + var options commitOptions + + cmd := &cobra.Command{ + Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", + Short: "Create a new image from a container's changes", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.container = args[0] + if len(args) > 1 { + options.reference = args[1] + } + return runCommit(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.BoolVarP(&options.pause, "pause", "p", true, "Pause container during commit") + flags.StringVarP(&options.comment, "message", "m", "", "Commit message") + flags.StringVarP(&options.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") + + options.changes = opts.NewListOpts(nil) + flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image") + + return cmd +} + +func runCommit(dockerCli *command.DockerCli, options *commitOptions) error { + ctx := context.Background() + + name := options.container + reference := options.reference + + commitOptions := types.ContainerCommitOptions{ + Reference: reference, + Comment: options.comment, + Author: options.author, + Changes: options.changes.GetAll(), + Pause: options.pause, + } + + response, err := dockerCli.Client().ContainerCommit(ctx, name, commitOptions) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/cp.go b/vendor/github.com/docker/cli/cli/command/container/cp.go new file mode 100644 index 0000000..17a4030 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/cp.go @@ -0,0 +1,305 @@ +package container + +import ( + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type copyOptions struct { + source string + destination string + followLink bool + copyUIDGID bool +} + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool +} + +// NewCopyCommand creates a new `docker cp` command +func NewCopyCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts copyOptions + + cmd := &cobra.Command{ + Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, + Short: "Copy files/folders between a container and the local filesystem", + Long: strings.Join([]string{ + "Copy files/folders between a container and the local filesystem\n", + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + if args[0] == "" { + return errors.New("source can not be empty") + } + if args[1] == "" { + return errors.New("destination can not be empty") + } + opts.source = args[0] + opts.destination = args[1] + return runCopy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") + flags.BoolVarP(&opts.copyUIDGID, "archive", "a", false, "Archive mode (copy all uid/gid information)") + + return cmd +} + +func runCopy(dockerCli *command.DockerCli, opts copyOptions) error { + srcContainer, srcPath := splitCpArg(opts.source) + dstContainer, dstPath := splitCpArg(opts.destination) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + cpParam := &cpConfig{ + followLink: opts.followLink, + } + + ctx := context.Background() + + switch direction { + case fromContainer: + return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam) + case toContainer: + return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam, opts.copyUIDGID) + case acrossContainers: + // Copying between containers isn't supported. + return errors.New("copying between containers is not supported") + default: + // User didn't specify any container. + return errors.New("must specify at least one container source") + } +} + +func statContainerPath(ctx context.Context, dockerCli *command.DockerCli, containerName, path string) (types.ContainerPathStat, error) { + return dockerCli.Client().ContainerStatPath(ctx, containerName, path) +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func copyFromContainer(ctx context.Context, dockerCli *command.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if cpParam.followLink { + srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, content) + + return err + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +func copyToContainer(ctx context.Context, dockerCli *command.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig, copyUIDGID bool) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return errors.Errorf("destination \"%s:%s\" must be a directory", dstContainer, dstPath) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + CopyUIDGID: copyUIDGID, + } + + return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} diff --git a/vendor/github.com/docker/cli/cli/command/container/create.go b/vendor/github.com/docker/cli/cli/command/container/create.go new file mode 100644 index 0000000..2795e4f --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/create.go @@ -0,0 +1,224 @@ +package container + +import ( + "fmt" + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + apiclient "github.com/docker/docker/client" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/net/context" +) + +type createOptions struct { + name string +} + +// NewCreateCommand creates a new cobra.Command for `docker create` +func NewCreateCommand(dockerCli command.Cli) *cobra.Command { + var opts createOptions + var copts *containerOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runCreate(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddTrustVerificationFlags(flags) + copts = addFlags(flags) + return cmd +} + +func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, opts *createOptions, copts *containerOptions) error { + containerConfig, err := parse(flags, copts) + if err != nil { + reportError(dockerCli.Err(), "create", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + response, err := createContainer(context.Background(), dockerCli, containerConfig, opts.name) + if err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} + +func pullImage(ctx context.Context, dockerCli command.Cli, image string, out io.Writer) error { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + RegistryAuth: encodedAuth, + } + + responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream( + responseBody, + out, + dockerCli.Out().FD(), + dockerCli.Out().IsTerminal(), + nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func (cid *cidFile) Close() error { + cid.file.Close() + + if cid.written { + return nil + } + if err := os.Remove(cid.path); err != nil { + return errors.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return errors.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, errors.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, errors.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig *containerConfig, name string) (*container.ContainerCreateCreatedBody, error) { + config := containerConfig.Config + hostConfig := containerConfig.HostConfig + networkingConfig := containerConfig.NetworkingConfig + stderr := dockerCli.Err() + + var ( + containerIDFile *cidFile + trustedRef reference.Canonical + namedRef reference.Named + ) + + cidfile := hostConfig.ContainerIDFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + ref, err := reference.ParseAnyReference(config.Image) + if err != nil { + return nil, err + } + if named, ok := ref.(reference.Named); ok { + namedRef = reference.TagNameOnly(named) + + if taggedRef, ok := namedRef.(reference.NamedTagged); ok && command.IsTrusted() { + var err error + trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef, nil) + if err != nil { + return nil, err + } + config.Image = reference.FamiliarString(trustedRef) + } + } + + //create the container + response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) + + //if image not found try to pull it + if err != nil { + if apiclient.IsErrImageNotFound(err) && namedRef != nil { + fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef)) + + // we don't want to write to stdout anything apart from container.ID + if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil { + return nil, err + } + if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil { + if err := image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef); err != nil { + return nil, err + } + } + // Retry + var retryErr error + response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(stderr, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/diff.go b/vendor/github.com/docker/cli/cli/command/container/diff.go new file mode 100644 index 0000000..5f646aa --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/diff.go @@ -0,0 +1,46 @@ +package container + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type diffOptions struct { + container string +} + +// NewDiffCommand creates a new cobra.Command for `docker diff` +func NewDiffCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts diffOptions + + return &cobra.Command{ + Use: "diff CONTAINER", + Short: "Inspect changes to files or directories on a container's filesystem", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runDiff(dockerCli, &opts) + }, + } +} + +func runDiff(dockerCli *command.DockerCli, opts *diffOptions) error { + if opts.container == "" { + return errors.New("Container name cannot be empty") + } + ctx := context.Background() + + changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) + if err != nil { + return err + } + diffCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewDiffFormat("{{.Type}} {{.Path}}"), + } + return formatter.DiffWrite(diffCtx, changes) +} diff --git a/vendor/github.com/docker/cli/cli/command/container/exec.go b/vendor/github.com/docker/cli/cli/command/container/exec.go new file mode 100644 index 0000000..bbcd34a --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/exec.go @@ -0,0 +1,220 @@ +package container + +import ( + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/docker/docker/pkg/promise" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type execOptions struct { + detachKeys string + interactive bool + tty bool + detach bool + user string + privileged bool + env *opts.ListOpts +} + +func newExecOptions() *execOptions { + var values []string + return &execOptions{ + env: opts.NewListOptsRef(&values, opts.ValidateEnv), + } +} + +// NewExecCommand creates a new cobra.Command for `docker exec` +func NewExecCommand(dockerCli command.Cli) *cobra.Command { + options := newExecOptions() + + cmd := &cobra.Command{ + Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]", + Short: "Run a command in a running container", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + container := args[0] + execCmd := args[1:] + return runExec(dockerCli, options, container, execCmd) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVarP(&options.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container") + flags.BoolVarP(&options.interactive, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.BoolVarP(&options.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.BoolVarP(&options.detach, "detach", "d", false, "Detached mode: run command in the background") + flags.StringVarP(&options.user, "user", "u", "", "Username or UID (format: [:])") + flags.BoolVarP(&options.privileged, "privileged", "", false, "Give extended privileges to the command") + flags.VarP(options.env, "env", "e", "Set environment variables") + flags.SetAnnotation("env", "version", []string{"1.25"}) + + return cmd +} + +// nolint: gocyclo +func runExec(dockerCli command.Cli, options *execOptions, container string, execCmd []string) error { + execConfig, err := parseExec(options, execCmd) + // just in case the ParseExec does not exit + if container == "" || err != nil { + return cli.StatusError{StatusCode: 1} + } + + if options.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = options.detachKeys + } + + // Send client escape keys + execConfig.DetachKeys = dockerCli.ConfigFile().DetachKeys + + ctx := context.Background() + client := dockerCli.Client() + + // We need to check the tty _before_ we do the ContainerExecCreate, because + // otherwise if we error out we will leak execIDs on the server (and + // there's no easy way to clean those up). But also in order to make "not + // exist" errors take precedence we do a dummy inspect first. + if _, err := client.ContainerInspect(ctx, container); err != nil { + return err + } + if !execConfig.Detach { + if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } + + response, err := client.ContainerExecCreate(ctx, container, *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + fmt.Fprintln(dockerCli.Out(), "exec ID empty") + return nil + } + + // Temp struct for execStart so that we don't need to transfer all the execConfig. + if execConfig.Detach { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + return client.ContainerExecStart(ctx, execID, execStartCheck) + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + errCh chan error + ) + + if execConfig.AttachStdin { + in = dockerCli.In() + } + if execConfig.AttachStdout { + out = dockerCli.Out() + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = dockerCli.Out() + } else { + stderr = dockerCli.Err() + } + } + + resp, err := client.ContainerExecAttach(ctx, execID, *execConfig) + if err != nil { + return err + } + defer resp.Close() + errCh = promise.Go(func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: out, + errorStream: stderr, + resp: resp, + tty: execConfig.Tty, + detachKeys: execConfig.DetachKeys, + } + + return streamer.stream(ctx) + }) + + if execConfig.Tty && dockerCli.In().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil { + fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(ctx, client, execID); err != nil { + return err + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + + return nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(ctx context.Context, client apiclient.ContainerAPIClient, execID string) (bool, int, error) { + resp, err := client.ContainerExecInspect(ctx, execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !apiclient.IsErrConnectionFailed(err) { + return false, -1, err + } + return false, -1, nil + } + + return resp.Running, resp.ExitCode, nil +} + +// parseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +func parseExec(opts *execOptions, execCmd []string) (*types.ExecConfig, error) { + execConfig := &types.ExecConfig{ + User: opts.user, + Privileged: opts.privileged, + Tty: opts.tty, + Cmd: execCmd, + Detach: opts.detach, + } + + // If -d is not set, attach to everything by default + if !opts.detach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if opts.interactive { + execConfig.AttachStdin = true + } + } + + if opts.env != nil { + execConfig.Env = opts.env.GetAll() + } + + return execConfig, nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/export.go b/vendor/github.com/docker/cli/cli/command/container/export.go new file mode 100644 index 0000000..700f557 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/export.go @@ -0,0 +1,58 @@ +package container + +import ( + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type exportOptions struct { + container string + output string +} + +// NewExportCommand creates a new `docker export` command +func NewExportCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts exportOptions + + cmd := &cobra.Command{ + Use: "export [OPTIONS] CONTAINER", + Short: "Export a container's filesystem as a tar archive", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runExport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runExport(dockerCli *command.DockerCli, opts exportOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect") + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ContainerExport(context.Background(), opts.container) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/docker/cli/cli/command/container/hijack.go b/vendor/github.com/docker/cli/cli/command/container/hijack.go new file mode 100644 index 0000000..b6d467d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/hijack.go @@ -0,0 +1,207 @@ +package container + +import ( + "fmt" + "io" + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" + "golang.org/x/net/context" +) + +// The default escape key sequence: ctrl-p, ctrl-q +// TODO: This could be moved to `pkg/term`. +var defaultEscapeKeys = []byte{16, 17} + +// A hijackedIOStreamer handles copying input to and output from streams to the +// connection. +type hijackedIOStreamer struct { + streams command.Streams + inputStream io.ReadCloser + outputStream io.Writer + errorStream io.Writer + + resp types.HijackedResponse + + tty bool + detachKeys string +} + +// stream handles setting up the IO and then begins streaming stdin/stdout +// to/from the hijacked connection, blocking until it is either done reading +// output, the user inputs the detach key sequence when in TTY mode, or when +// the given context is cancelled. +func (h *hijackedIOStreamer) stream(ctx context.Context) error { + restoreInput, err := h.setupInput() + if err != nil { + return fmt.Errorf("unable to setup input stream: %s", err) + } + + defer restoreInput() + + outputDone := h.beginOutputStream(restoreInput) + inputDone, detached := h.beginInputStream(restoreInput) + + select { + case err := <-outputDone: + return err + case <-inputDone: + // Input stream has closed. + if h.outputStream != nil || h.errorStream != nil { + // Wait for output to complete streaming. + select { + case err := <-outputDone: + return err + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + case err := <-detached: + // Got a detach key sequence. + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +func (h *hijackedIOStreamer) setupInput() (restore func(), err error) { + if h.inputStream == nil || !h.tty { + // No need to setup input TTY. + // The restore func is a nop. + return func() {}, nil + } + + if err := setRawTerminal(h.streams); err != nil { + return nil, fmt.Errorf("unable to set IO streams as raw terminal: %s", err) + } + + // Use sync.Once so we may call restore multiple times but ensure we + // only restore the terminal once. + var restoreOnce sync.Once + restore = func() { + restoreOnce.Do(func() { + restoreTerminal(h.streams, h.inputStream) + }) + } + + // Wrap the input to detect detach escape sequence. + // Use default escape keys if an invalid sequence is given. + escapeKeys := defaultEscapeKeys + if h.detachKeys != "" { + customEscapeKeys, err := term.ToBytes(h.detachKeys) + if err != nil { + logrus.Warnf("invalid detach escape keys, using default: %s", err) + } else { + escapeKeys = customEscapeKeys + } + } + + h.inputStream = ioutils.NewReadCloserWrapper(term.NewEscapeProxy(h.inputStream, escapeKeys), h.inputStream.Close) + + return restore, nil +} + +func (h *hijackedIOStreamer) beginOutputStream(restoreInput func()) <-chan error { + if h.outputStream == nil && h.errorStream == nil { + // There is no need to copy output. + return nil + } + + outputDone := make(chan error) + go func() { + var err error + + // When TTY is ON, use regular copy + if h.outputStream != nil && h.tty { + _, err = io.Copy(h.outputStream, h.resp.Reader) + // We should restore the terminal as soon as possible + // once the connection ends so any following print + // messages will be in normal type. + restoreInput() + } else { + _, err = stdcopy.StdCopy(h.outputStream, h.errorStream, h.resp.Reader) + } + + logrus.Debug("[hijack] End of stdout") + + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + } + + outputDone <- err + }() + + return outputDone +} + +func (h *hijackedIOStreamer) beginInputStream(restoreInput func()) (doneC <-chan struct{}, detachedC <-chan error) { + inputDone := make(chan struct{}) + detached := make(chan error) + + go func() { + if h.inputStream != nil { + _, err := io.Copy(h.resp.Conn, h.inputStream) + // We should restore the terminal as soon as possible + // once the connection ends so any following print + // messages will be in normal type. + restoreInput() + + logrus.Debug("[hijack] End of stdin") + + if _, ok := err.(term.EscapeError); ok { + detached <- err + return + } + + if err != nil { + // This error will also occur on the receive + // side (from stdout) where it will be + // propagated back to the caller. + logrus.Debugf("Error sendStdin: %s", err) + } + } + + if err := h.resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + + close(inputDone) + }() + + return inputDone, detached +} + +func setRawTerminal(streams command.Streams) error { + if err := streams.In().SetRawTerminal(); err != nil { + return err + } + return streams.Out().SetRawTerminal() +} + +func restoreTerminal(streams command.Streams, in io.Closer) error { + streams.In().RestoreTerminal() + streams.Out().RestoreTerminal() + // WARNING: DO NOT REMOVE THE OS CHECKS !!! + // For some reason this Close call blocks on darwin.. + // As the client exits right after, simply discard the close + // until we find a better solution. + // + // This can also cause the client on Windows to get stuck in Win32 CloseHandle() + // in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442 + // Tracked internally at Microsoft by VSO #11352156. In the + // Windows case, you hit this if you are using the native/v2 console, + // not the "legacy" console, and you start the client in a new window. eg + // `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar` + // will hang. Remove start, and it won't repro. + if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" { + return in.Close() + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/inspect.go b/vendor/github.com/docker/cli/cli/command/container/inspect.go new file mode 100644 index 0000000..7e0e710 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/inspect.go @@ -0,0 +1,46 @@ +package container + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + format string + size bool + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker container inspect` +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Display detailed information on one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ContainerInspectWithRaw(ctx, ref, opts.size) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/docker/cli/cli/command/container/kill.go b/vendor/github.com/docker/cli/cli/command/container/kill.go new file mode 100644 index 0000000..650f957 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/kill.go @@ -0,0 +1,56 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type killOptions struct { + signal string + + containers []string +} + +// NewKillCommand creates a new cobra.Command for `docker kill` +func NewKillCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts killOptions + + cmd := &cobra.Command{ + Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Kill one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runKill(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") + return cmd +} + +func runKill(dockerCli *command.DockerCli, opts *killOptions) error { + var errs []string + ctx := context.Background() + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + return dockerCli.Client().ContainerKill(ctx, container, opts.signal) + }) + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintln(dockerCli.Out(), name) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/list.go b/vendor/github.com/docker/cli/cli/command/container/list.go new file mode 100644 index 0000000..844ef57 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/list.go @@ -0,0 +1,140 @@ +package container + +import ( + "io/ioutil" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/templates" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type psOptions struct { + quiet bool + size bool + all bool + noTrunc bool + nLatest bool + last int + format string + filter opts.FilterOpt +} + +// NewPsCommand creates a new cobra.Command for `docker ps` +func NewPsCommand(dockerCli *command.DockerCli) *cobra.Command { + options := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS]", + Short: "List containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPs(dockerCli, &options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display numeric IDs") + flags.BoolVarP(&options.size, "size", "s", false, "Display total file sizes") + flags.BoolVarP(&options.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVarP(&options.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") + flags.IntVarP(&options.last, "last", "n", -1, "Show n last created containers (includes all states)") + flags.StringVarP(&options.format, "format", "", "", "Pretty-print containers using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewPsCommand(dockerCli) + cmd.Aliases = []string{"ps", "list"} + cmd.Use = "ls [OPTIONS]" + return &cmd +} + +// listOptionsProcessor is used to set any container list options which may only +// be embedded in the format template. +// This is passed directly into tmpl.Execute in order to allow the preprocessor +// to set any list options that were not provided by flags (e.g. `.Size`). +// It is using a `map[string]bool` so that unknown fields passed into the +// template format do not cause errors. These errors will get picked up when +// running through the actual template processor. +type listOptionsProcessor map[string]bool + +// Size sets the size of the map when called by a template execution. +func (o listOptionsProcessor) Size() bool { + o["size"] = true + return true +} + +// Label is needed here as it allows the correct pre-processing +// because Label() is a method with arguments +func (o listOptionsProcessor) Label(name string) string { + return "" +} + +func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) { + options := &types.ContainerListOptions{ + All: opts.all, + Limit: opts.last, + Size: opts.size, + Filters: opts.filter.Value(), + } + + if opts.nLatest && opts.last == -1 { + options.Limit = 1 + } + + tmpl, err := templates.Parse(opts.format) + + if err != nil { + return nil, err + } + + optionsProcessor := listOptionsProcessor{} + // This shouldn't error out but swallowing the error makes it harder + // to track down if preProcessor issues come up. Ref #24696 + if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil { + return nil, err + } + // At the moment all we need is to capture .Size for preprocessor + options.Size = opts.size || optionsProcessor["size"] + + return options, nil +} + +func runPs(dockerCli *command.DockerCli, options *psOptions) error { + ctx := context.Background() + + listOptions, err := buildContainerListOptions(options) + if err != nil { + return err + } + + containers, err := dockerCli.Client().ContainerList(ctx, *listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().PsFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().PsFormat + } else { + format = formatter.TableFormatKey + } + } + + containerCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewContainerFormat(format, options.quiet, listOptions.Size), + Trunc: !options.noTrunc, + } + return formatter.ContainerWrite(containerCtx, containers) +} diff --git a/vendor/github.com/docker/cli/cli/command/container/logs.go b/vendor/github.com/docker/cli/cli/command/container/logs.go new file mode 100644 index 0000000..209e749 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/logs.go @@ -0,0 +1,76 @@ +package container + +import ( + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type logsOptions struct { + follow bool + since string + timestamps bool + details bool + tail string + + container string +} + +// NewLogsCommand creates a new cobra.Command for `docker logs` +func NewLogsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] CONTAINER", + Short: "Fetch the logs of a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runLogs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { + ctx := context.Background() + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) + if err != nil { + return err + } + defer responseBody.Close() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if c.Config.Tty { + _, err = io.Copy(dockerCli.Out(), responseBody) + } else { + _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) + } + return err +} diff --git a/vendor/github.com/docker/cli/cli/command/container/opts.go b/vendor/github.com/docker/cli/cli/command/container/opts.go new file mode 100644 index 0000000..cf1f931 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/opts.go @@ -0,0 +1,841 @@ +package container + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "regexp" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +var ( + deviceCgroupRuleRegexp = regexp.MustCompile(`^[acb] ([0-9]+|\*):([0-9]+|\*) [rwm]{1,3}$`) +) + +// containerOptions is a data object with all the options for creating a container +type containerOptions struct { + attach opts.ListOpts + volumes opts.ListOpts + tmpfs opts.ListOpts + mounts opts.MountOpt + blkioWeightDevice opts.WeightdeviceOpt + deviceReadBps opts.ThrottledeviceOpt + deviceWriteBps opts.ThrottledeviceOpt + links opts.ListOpts + aliases opts.ListOpts + linkLocalIPs opts.ListOpts + deviceReadIOps opts.ThrottledeviceOpt + deviceWriteIOps opts.ThrottledeviceOpt + env opts.ListOpts + labels opts.ListOpts + deviceCgroupRules opts.ListOpts + devices opts.ListOpts + ulimits *opts.UlimitOpt + sysctls *opts.MapOpts + publish opts.ListOpts + expose opts.ListOpts + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOptions opts.ListOpts + extraHosts opts.ListOpts + volumesFrom opts.ListOpts + envFile opts.ListOpts + capAdd opts.ListOpts + capDrop opts.ListOpts + groupAdd opts.ListOpts + securityOpt opts.ListOpts + storageOpt opts.ListOpts + labelsFile opts.ListOpts + loggingOpts opts.ListOpts + privileged bool + pidMode string + utsMode string + usernsMode string + publishAll bool + stdin bool + tty bool + oomKillDisable bool + oomScoreAdj int + containerIDFile string + entrypoint string + hostname string + memory opts.MemBytes + memoryReservation opts.MemBytes + memorySwap opts.MemSwapBytes + kernelMemory opts.MemBytes + user string + workingDir string + cpuCount int64 + cpuShares int64 + cpuPercent int64 + cpuPeriod int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpuQuota int64 + cpus opts.NanoCPUs + cpusetCpus string + cpusetMems string + blkioWeight uint16 + ioMaxBandwidth opts.MemBytes + ioMaxIOps uint64 + swappiness int64 + netMode string + macAddress string + ipv4Address string + ipv6Address string + ipcMode string + pidsLimit int64 + restartPolicy string + readonlyRootfs bool + loggingDriver string + cgroupParent string + volumeDriver string + stopSignal string + stopTimeout int + isolation string + shmSize opts.MemBytes + noHealthcheck bool + healthCmd string + healthInterval time.Duration + healthTimeout time.Duration + healthStartPeriod time.Duration + healthRetries int + runtime string + autoRemove bool + init bool + + Image string + Args []string +} + +// addFlags adds all command line flags that will be used by parse to the FlagSet +func addFlags(flags *pflag.FlagSet) *containerOptions { + copts := &containerOptions{ + aliases: opts.NewListOpts(nil), + attach: opts.NewListOpts(validateAttach), + blkioWeightDevice: opts.NewWeightdeviceOpt(opts.ValidateWeightDevice), + capAdd: opts.NewListOpts(nil), + capDrop: opts.NewListOpts(nil), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOptions: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + deviceCgroupRules: opts.NewListOpts(validateDeviceCgroupRule), + deviceReadBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), + deviceReadIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), + deviceWriteBps: opts.NewThrottledeviceOpt(opts.ValidateThrottleBpsDevice), + deviceWriteIOps: opts.NewThrottledeviceOpt(opts.ValidateThrottleIOpsDevice), + devices: opts.NewListOpts(validateDevice), + env: opts.NewListOpts(opts.ValidateEnv), + envFile: opts.NewListOpts(nil), + expose: opts.NewListOpts(nil), + extraHosts: opts.NewListOpts(opts.ValidateExtraHost), + groupAdd: opts.NewListOpts(nil), + labels: opts.NewListOpts(opts.ValidateEnv), + labelsFile: opts.NewListOpts(nil), + linkLocalIPs: opts.NewListOpts(nil), + links: opts.NewListOpts(opts.ValidateLink), + loggingOpts: opts.NewListOpts(nil), + publish: opts.NewListOpts(nil), + securityOpt: opts.NewListOpts(nil), + storageOpt: opts.NewListOpts(nil), + sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), + tmpfs: opts.NewListOpts(nil), + ulimits: opts.NewUlimitOpt(nil), + volumes: opts.NewListOpts(nil), + volumesFrom: opts.NewListOpts(nil), + } + + // General purpose flags + flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") + flags.Var(&copts.deviceCgroupRules, "device-cgroup-rule", "Add a rule to the cgroup allowed devices list") + flags.Var(&copts.devices, "device", "Add a host device to the container") + flags.VarP(&copts.env, "env", "e", "Set environment variables") + flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") + flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") + flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") + flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") + flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") + flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") + flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") + flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") + flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, "Signal to stop a container") + flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") + flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) + flags.Var(copts.sysctls, "sysctl", "Sysctl options") + flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.Var(copts.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") + flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") + flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") + + // Security + flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") + flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") + flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") + flags.Var(&copts.securityOpt, "security-opt", "Security Options") + flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") + + // Network and port publishing flag + flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.Var(&copts.dns, "dns", "Set custom DNS servers") + // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. + // This is to be consistent with service create/update + flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") + flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") + flags.MarkHidden("dns-opt") + flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") + flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") + flags.StringVar(&copts.ipv4Address, "ip", "", "IPv4 address (e.g., 172.30.100.104)") + flags.StringVar(&copts.ipv6Address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") + flags.Var(&copts.links, "link", "Add link to another container") + flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") + flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g., 92:d0:c6:0a:29:33)") + flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") + flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") + // We allow for both "--net" and "--network", although the latter is the recommended way. + flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network") + flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network") + flags.MarkHidden("net") + // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. + flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") + flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") + flags.MarkHidden("net-alias") + + // Logging and storage + flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") + flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") + flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") + flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") + flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") + flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") + flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") + flags.Var(&copts.mounts, "mount", "Attach a filesystem mount to the container") + + // Health-checking + flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") + flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ms|s|m|h) (default 0s)") + flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") + flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ms|s|m|h) (default 0s)") + flags.DurationVar(&copts.healthStartPeriod, "health-start-period", 0, "Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)") + flags.SetAnnotation("health-start-period", "version", []string{"1.29"}) + flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") + + // Resource management + flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") + flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") + flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") + flags.SetAnnotation("cpu-count", "ostype", []string{"windows"}) + flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") + flags.SetAnnotation("cpu-percent", "ostype", []string{"windows"}) + flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") + flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) + flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") + flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) + flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Var(&copts.cpus, "cpus", "Number of CPUs") + flags.SetAnnotation("cpus", "version", []string{"1.25"}) + flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") + flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") + flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") + flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") + flags.Var(&copts.ioMaxBandwidth, "io-maxbandwidth", "Maximum IO bandwidth limit for the system drive (Windows only)") + flags.SetAnnotation("io-maxbandwidth", "ostype", []string{"windows"}) + flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") + flags.SetAnnotation("io-maxiops", "ostype", []string{"windows"}) + flags.Var(&copts.kernelMemory, "kernel-memory", "Kernel memory limit") + flags.VarP(&copts.memory, "memory", "m", "Memory limit") + flags.Var(&copts.memoryReservation, "memory-reservation", "Memory soft limit") + flags.Var(&copts.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") + flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") + flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") + flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") + + // Low-level execution (cgroups, namespaces, ...) + flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&copts.ipcMode, "ipc", "", "IPC namespace to use") + flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") + flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") + flags.Var(&copts.shmSize, "shm-size", "Size of /dev/shm") + flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") + flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") + + flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") + flags.SetAnnotation("init", "version", []string{"1.25"}) + return copts +} + +type containerConfig struct { + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *networktypes.NetworkingConfig +} + +// parse parses the args for the specified command and generates a Config, +// a HostConfig and returns them with the specified command. +// If the specified args are not valid, it will return an error. +// nolint: gocyclo +func parse(flags *pflag.FlagSet, copts *containerOptions) (*containerConfig, error) { + var ( + attachStdin = copts.attach.Get("stdin") + attachStdout = copts.attach.Get("stdout") + attachStderr = copts.attach.Get("stderr") + ) + + // Validate the input mac address + if copts.macAddress != "" { + if _, err := opts.ValidateMACAddress(copts.macAddress); err != nil { + return nil, errors.Errorf("%s is not a valid mac address", copts.macAddress) + } + } + if copts.stdin { + attachStdin = true + } + // If -a is not set, attach to stdout and stderr + if copts.attach.Len() == 0 { + attachStdout = true + attachStderr = true + } + + var err error + + swappiness := copts.swappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, errors.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + + mounts := copts.mounts.Value() + if len(mounts) > 0 && copts.volumeDriver != "" { + logrus.Warn("`--volume-driver` is ignored for volumes specified via `--mount`. Use `--mount type=volume,volume-driver=...` instead.") + } + var binds []string + volumes := copts.volumes.GetMap() + // add any bind targets to the list of container volumes + for bind := range copts.volumes.GetMap() { + parsed, _ := loader.ParseVolume(bind) + if parsed.Source != "" { + // after creating the bind mount we want to delete it from the copts.volumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if + // there are duplicates entries. + delete(volumes, bind) + } + } + + // Can't evaluate options passed into --tmpfs until we actually mount + tmpfs := make(map[string]string) + for _, t := range copts.tmpfs.GetAll() { + if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { + tmpfs[arr[0]] = arr[1] + } else { + tmpfs[arr[0]] = "" + } + } + + var ( + runCmd strslice.StrSlice + entrypoint strslice.StrSlice + ) + + if len(copts.Args) > 0 { + runCmd = strslice.StrSlice(copts.Args) + } + + if copts.entrypoint != "" { + entrypoint = strslice.StrSlice{copts.entrypoint} + } else if flags.Changed("entrypoint") { + // if `--entrypoint=` is parsed then Entrypoint is reset + entrypoint = []string{""} + } + + ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll()) + if err != nil { + return nil, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range copts.expose.GetAll() { + if strings.Contains(e, ":") { + return nil, errors.Errorf("invalid port format for --expose: %s", e) + } + //support two formats for expose, original format /[] or /[] + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + //if expose a port, the start and end port are the same + start, end, err := nat.ParsePortRange(port) + if err != nil { + return nil, errors.Errorf("invalid range format for --expose: %s, error: %s", e, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, err + } + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } + + // parse device mappings + deviceMappings := []container.DeviceMapping{} + for _, device := range copts.devices.GetAll() { + deviceMapping, err := parseDevice(device) + if err != nil { + return nil, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables, err := opts.ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll()) + if err != nil { + return nil, err + } + + // collect all the labels for the container + labels, err := opts.ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) + if err != nil { + return nil, err + } + + ipcMode := container.IpcMode(copts.ipcMode) + if !ipcMode.Valid() { + return nil, errors.Errorf("--ipc: invalid IPC mode") + } + + pidMode := container.PidMode(copts.pidMode) + if !pidMode.Valid() { + return nil, errors.Errorf("--pid: invalid PID mode") + } + + utsMode := container.UTSMode(copts.utsMode) + if !utsMode.Valid() { + return nil, errors.Errorf("--uts: invalid UTS mode") + } + + usernsMode := container.UsernsMode(copts.usernsMode) + if !usernsMode.Valid() { + return nil, errors.Errorf("--userns: invalid USER mode") + } + + restartPolicy, err := opts.ParseRestartPolicy(copts.restartPolicy) + if err != nil { + return nil, err + } + + loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) + if err != nil { + return nil, err + } + + securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) + if err != nil { + return nil, err + } + + storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) + if err != nil { + return nil, err + } + + // Healthcheck + var healthConfig *container.HealthConfig + haveHealthSettings := copts.healthCmd != "" || + copts.healthInterval != 0 || + copts.healthTimeout != 0 || + copts.healthStartPeriod != 0 || + copts.healthRetries != 0 + if copts.noHealthcheck { + if haveHealthSettings { + return nil, errors.Errorf("--no-healthcheck conflicts with --health-* options") + } + test := strslice.StrSlice{"NONE"} + healthConfig = &container.HealthConfig{Test: test} + } else if haveHealthSettings { + var probe strslice.StrSlice + if copts.healthCmd != "" { + args := []string{"CMD-SHELL", copts.healthCmd} + probe = strslice.StrSlice(args) + } + if copts.healthInterval < 0 { + return nil, errors.Errorf("--health-interval cannot be negative") + } + if copts.healthTimeout < 0 { + return nil, errors.Errorf("--health-timeout cannot be negative") + } + if copts.healthRetries < 0 { + return nil, errors.Errorf("--health-retries cannot be negative") + } + if copts.healthStartPeriod < 0 { + return nil, fmt.Errorf("--health-start-period cannot be negative") + } + + healthConfig = &container.HealthConfig{ + Test: probe, + Interval: copts.healthInterval, + Timeout: copts.healthTimeout, + StartPeriod: copts.healthStartPeriod, + Retries: copts.healthRetries, + } + } + + resources := container.Resources{ + CgroupParent: copts.cgroupParent, + Memory: copts.memory.Value(), + MemoryReservation: copts.memoryReservation.Value(), + MemorySwap: copts.memorySwap.Value(), + MemorySwappiness: &copts.swappiness, + KernelMemory: copts.kernelMemory.Value(), + OomKillDisable: &copts.oomKillDisable, + NanoCPUs: copts.cpus.Value(), + CPUCount: copts.cpuCount, + CPUPercent: copts.cpuPercent, + CPUShares: copts.cpuShares, + CPUPeriod: copts.cpuPeriod, + CpusetCpus: copts.cpusetCpus, + CpusetMems: copts.cpusetMems, + CPUQuota: copts.cpuQuota, + CPURealtimePeriod: copts.cpuRealtimePeriod, + CPURealtimeRuntime: copts.cpuRealtimeRuntime, + PidsLimit: copts.pidsLimit, + BlkioWeight: copts.blkioWeight, + BlkioWeightDevice: copts.blkioWeightDevice.GetList(), + BlkioDeviceReadBps: copts.deviceReadBps.GetList(), + BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), + BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), + BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), + IOMaximumIOps: copts.ioMaxIOps, + IOMaximumBandwidth: uint64(copts.ioMaxBandwidth), + Ulimits: copts.ulimits.GetList(), + DeviceCgroupRules: copts.deviceCgroupRules.GetAll(), + Devices: deviceMappings, + } + + config := &container.Config{ + Hostname: copts.hostname, + ExposedPorts: ports, + User: copts.user, + Tty: copts.tty, + // TODO: deprecated, it comes from -n, --networking + // it's still needed internally to set the network to disabled + // if e.g. bridge is none in daemon opts, and in inspect + NetworkDisabled: false, + OpenStdin: copts.stdin, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: copts.Image, + Volumes: volumes, + MacAddress: copts.macAddress, + Entrypoint: entrypoint, + WorkingDir: copts.workingDir, + Labels: opts.ConvertKVStringsToMap(labels), + Healthcheck: healthConfig, + } + if flags.Changed("stop-signal") { + config.StopSignal = copts.stopSignal + } + if flags.Changed("stop-timeout") { + config.StopTimeout = &copts.stopTimeout + } + + hostConfig := &container.HostConfig{ + Binds: binds, + ContainerIDFile: copts.containerIDFile, + OomScoreAdj: copts.oomScoreAdj, + AutoRemove: copts.autoRemove, + Privileged: copts.privileged, + PortBindings: portBindings, + Links: copts.links.GetAll(), + PublishAllPorts: copts.publishAll, + // Make sure the dns fields are never nil. + // New containers don't ever have those fields nil, + // but pre created containers can still have those nil values. + // See https://github.com/docker/docker/pull/17779 + // for a more detailed explanation on why we don't want that. + DNS: copts.dns.GetAllOrEmpty(), + DNSSearch: copts.dnsSearch.GetAllOrEmpty(), + DNSOptions: copts.dnsOptions.GetAllOrEmpty(), + ExtraHosts: copts.extraHosts.GetAll(), + VolumesFrom: copts.volumesFrom.GetAll(), + NetworkMode: container.NetworkMode(copts.netMode), + IpcMode: ipcMode, + PidMode: pidMode, + UTSMode: utsMode, + UsernsMode: usernsMode, + CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), + CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), + GroupAdd: copts.groupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: securityOpts, + StorageOpt: storageOpts, + ReadonlyRootfs: copts.readonlyRootfs, + LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, + VolumeDriver: copts.volumeDriver, + Isolation: container.Isolation(copts.isolation), + ShmSize: copts.shmSize.Value(), + Resources: resources, + Tmpfs: tmpfs, + Sysctls: copts.sysctls.GetAll(), + Runtime: copts.runtime, + Mounts: mounts, + } + + if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, errors.Errorf("Conflicting options: --restart and --rm") + } + + // only set this value if the user provided the flag, else it should default to nil + if flags.Changed("init") { + hostConfig.Init = &copts.init + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + + networkingConfig := &networktypes.NetworkingConfig{ + EndpointsConfig: make(map[string]*networktypes.EndpointSettings), + } + + if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 { + epConfig := &networktypes.EndpointSettings{} + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + + epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: copts.ipv4Address, + IPv6Address: copts.ipv6Address, + } + + if copts.linkLocalIPs.Len() > 0 { + epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) + copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll()) + } + } + + if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Links = make([]string, len(hostConfig.Links)) + copy(epConfig.Links, hostConfig.Links) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + if copts.aliases.Len() > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Aliases = make([]string, copts.aliases.Len()) + copy(epConfig.Aliases, copts.aliases.GetAll()) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + return &containerConfig{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + }, nil +} + +func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { + loggingOptsMap := opts.ConvertKVStringsToMap(loggingOpts) + if loggingDriver == "none" && len(loggingOpts) > 0 { + return map[string]string{}, errors.Errorf("invalid logging opts for driver %s", loggingDriver) + } + return loggingOptsMap, nil +} + +// takes a local seccomp daemon, reads the file contents for sending to the daemon +func parseSecurityOpts(securityOpts []string) ([]string, error) { + for key, opt := range securityOpts { + con := strings.SplitN(opt, "=", 2) + if len(con) == 1 && con[0] != "no-new-privileges" { + if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + } else { + return securityOpts, errors.Errorf("Invalid --security-opt: %q", opt) + } + } + if con[0] == "seccomp" && con[1] != "unconfined" { + f, err := ioutil.ReadFile(con[1]) + if err != nil { + return securityOpts, errors.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) + } + b := bytes.NewBuffer(nil) + if err := json.Compact(b, f); err != nil { + return securityOpts, errors.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) + } + securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) + } + } + + return securityOpts, nil +} + +// parses storage options per container into a map +func parseStorageOpts(storageOpts []string) (map[string]string, error) { + m := make(map[string]string) + for _, option := range storageOpts { + if strings.Contains(option, "=") { + opt := strings.SplitN(option, "=", 2) + m[opt[0]] = opt[1] + } else { + return nil, errors.Errorf("invalid storage option") + } + } + return m, nil +} + +// parseDevice parses a device mapping string to a container.DeviceMapping struct +func parseDevice(device string) (container.DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + if validDeviceMode(arr[1]) { + permissions = arr[1] + } else { + dst = arr[1] + } + fallthrough + case 1: + src = arr[0] + default: + return container.DeviceMapping{}, errors.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := container.DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} + +// validateDeviceCgroupRule validates a device cgroup rule string format +// It will make sure 'val' is in the form: +// 'type major:minor mode' +func validateDeviceCgroupRule(val string) (string, error) { + if deviceCgroupRuleRegexp.MatchString(val) { + return val, nil + } + + return val, errors.Errorf("invalid device cgroup format '%s'", val) +} + +// validDeviceMode checks if the mode for device is valid or not. +// Valid mode is a composition of r (read), w (write), and m (mknod). +func validDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// validateDevice validates a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +// It also validates the device mode. +func validateDevice(val string) (string, error) { + return validatePath(val, validDeviceMode) +} + +func validatePath(val string, validator func(string) bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, errors.Errorf("bad format for path: %s", val) + } + + split := strings.SplitN(val, ":", 3) + if split[0] == "" { + return val, errors.Errorf("bad format for path: %s", val) + } + switch len(split) { + case 1: + containerPath = split[0] + val = path.Clean(containerPath) + case 2: + if isValid := validator(split[1]); isValid { + containerPath = split[0] + mode = split[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = split[1] + val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) + } + case 3: + containerPath = split[1] + mode = split[2] + if isValid := validator(split[2]); !isValid { + return val, errors.Errorf("bad mode specified: %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, errors.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// validateAttach validates that the specified string is a valid attach option. +func validateAttach(val string) (string, error) { + s := strings.ToLower(val) + for _, str := range []string{"stdin", "stdout", "stderr"} { + if s == str { + return s, nil + } + } + return val, errors.Errorf("valid streams are STDIN, STDOUT and STDERR") +} diff --git a/vendor/github.com/docker/cli/cli/command/container/pause.go b/vendor/github.com/docker/cli/cli/command/container/pause.go new file mode 100644 index 0000000..9dc404d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/pause.go @@ -0,0 +1,49 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type pauseOptions struct { + containers []string +} + +// NewPauseCommand creates a new cobra.Command for `docker pause` +func NewPauseCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pauseOptions + + return &cobra.Command{ + Use: "pause CONTAINER [CONTAINER...]", + Short: "Pause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runPause(dockerCli, &opts) + }, + } +} + +func runPause(dockerCli *command.DockerCli, opts *pauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/port.go b/vendor/github.com/docker/cli/cli/command/container/port.go new file mode 100644 index 0000000..13a1ae4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/port.go @@ -0,0 +1,78 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type portOptions struct { + container string + + port string +} + +// NewPortCommand creates a new cobra.Command for `docker port` +func NewPortCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts portOptions + + cmd := &cobra.Command{ + Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", + Short: "List port mappings or a specific mapping for the container", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.port = args[1] + } + return runPort(dockerCli, &opts) + }, + } + return cmd +} + +func runPort(dockerCli *command.DockerCli, opts *portOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if opts.port != "" { + port := opts.port + proto := "tcp" + parts := strings.SplitN(port, "/", 2) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return errors.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/prune.go b/vendor/github.com/docker/cli/cli/command/container/prune.go new file mode 100644 index 0000000..38ac647 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/prune.go @@ -0,0 +1,78 @@ +package container + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for containers +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all stopped containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const warning = `WARNING! This will remove all stopped containers. +Are you sure you want to continue?` + +func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := command.PruneFilters(dockerCli, options.filter.Value()) + + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ContainersPrune(context.Background(), pruneFilters) + if err != nil { + return + } + + if len(report.ContainersDeleted) > 0 { + output = "Deleted Containers:\n" + for _, id := range report.ContainersDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Container Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, filter: filter}) +} diff --git a/vendor/github.com/docker/cli/cli/command/container/rename.go b/vendor/github.com/docker/cli/cli/command/container/rename.go new file mode 100644 index 0000000..ad60bda --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/rename.go @@ -0,0 +1,51 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type renameOptions struct { + oldName string + newName string +} + +// NewRenameCommand creates a new cobra.Command for `docker rename` +func NewRenameCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts renameOptions + + cmd := &cobra.Command{ + Use: "rename CONTAINER NEW_NAME", + Short: "Rename a container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.oldName = args[0] + opts.newName = args[1] + return runRename(dockerCli, &opts) + }, + } + return cmd +} + +func runRename(dockerCli *command.DockerCli, opts *renameOptions) error { + ctx := context.Background() + + oldName := strings.TrimSpace(opts.oldName) + newName := strings.TrimSpace(opts.newName) + + if oldName == "" || newName == "" { + return errors.New("Error: Neither old nor new names may be empty") + } + + if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return errors.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/restart.go b/vendor/github.com/docker/cli/cli/command/container/restart.go new file mode 100644 index 0000000..6e13603 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/restart.go @@ -0,0 +1,62 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type restartOptions struct { + nSeconds int + nSecondsChanged bool + + containers []string +} + +// NewRestartCommand creates a new cobra.Command for `docker restart` +func NewRestartCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts restartOptions + + cmd := &cobra.Command{ + Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Restart one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nSecondsChanged = cmd.Flags().Changed("time") + return runRestart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") + return cmd +} + +func runRestart(dockerCli *command.DockerCli, opts *restartOptions) error { + ctx := context.Background() + var errs []string + var timeout *time.Duration + if opts.nSecondsChanged { + timeoutValue := time.Duration(opts.nSeconds) * time.Second + timeout = &timeoutValue + } + + for _, name := range opts.containers { + if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/rm.go b/vendor/github.com/docker/cli/cli/command/container/rm.go new file mode 100644 index 0000000..a7106ac --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/rm.go @@ -0,0 +1,73 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type rmOptions struct { + rmVolumes bool + rmLink bool + force bool + + containers []string +} + +// NewRmCommand creates a new cobra.Command for `docker rm` +func NewRmCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Remove one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runRm(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") + flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") + return cmd +} + +func runRm(dockerCli *command.DockerCli, opts *rmOptions) error { + ctx := context.Background() + + var errs []string + options := types.ContainerRemoveOptions{ + RemoveVolumes: opts.rmVolumes, + RemoveLinks: opts.rmLink, + Force: opts.force, + } + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + container = strings.Trim(container, "/") + if container == "" { + return errors.New("Container name cannot be empty") + } + return dockerCli.Client().ContainerRemove(ctx, container, options) + }) + + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/run.go b/vendor/github.com/docker/cli/cli/command/container/run.go new file mode 100644 index 0000000..56f0d0d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/run.go @@ -0,0 +1,337 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + "os" + "regexp" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/net/context" +) + +type runOptions struct { + detach bool + sigProxy bool + name string + detachKeys string +} + +// NewRunCommand create a new `docker run` command +func NewRunCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts runOptions + var copts *containerOptions + + cmd := &cobra.Command{ + Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Run a command in a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runRun(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + // These are flags not stored in Config/HostConfig + flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") + flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddTrustVerificationFlags(flags) + copts = addFlags(flags) + return cmd +} + +func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) { + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.") + } +} + +// check the DNS settings passed via --dns against localhost regexp to warn if +// they are trying to set a DNS to a localhost address +func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) { + for _, dnsIP := range hostConfig.DNS { + if isLocalhost(dnsIP) { + fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + return + } + } +} + +// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range. +const ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` + +var localhostIPRegexp = regexp.MustCompile(ipLocalhost) + +// IsLocalhost returns true if ip matches the localhost IP regular expression. +// Used for determining if nameserver settings are being passed which are +// localhost addresses +func isLocalhost(ip string) bool { + return localhostIPRegexp.MatchString(ip) +} + +func runRun(dockerCli *command.DockerCli, flags *pflag.FlagSet, ropts *runOptions, copts *containerOptions) error { + proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), copts.env.GetAll()) + newEnv := []string{} + for k, v := range proxyConfig { + if v == nil { + newEnv = append(newEnv, k) + } else { + newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v)) + } + } + copts.env = *opts.NewListOptsRef(&newEnv, nil) + containerConfig, err := parse(flags, copts) + // just in case the parse does not exit + if err != nil { + reportError(dockerCli.Err(), "run", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + return runContainer(dockerCli, ropts, copts, containerConfig) +} + +// nolint: gocyclo +func runContainer(dockerCli *command.DockerCli, opts *runOptions, copts *containerOptions, containerConfig *containerConfig) error { + config := containerConfig.Config + hostConfig := containerConfig.HostConfig + stdout, stderr := dockerCli.Out(), dockerCli.Err() + client := dockerCli.Client() + + // TODO: pass this as an argument + cmdPath := "run" + + warnOnOomKillDisable(*hostConfig, stderr) + warnOnLocalhostDNS(*hostConfig, stderr) + + config.ArgsEscaped = false + + if !opts.detach { + if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if copts.attach.Len() != 0 { + return errors.New("Conflicting options: -a and -d") + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable sigProxy when in TTY mode + if config.Tty { + opts.sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize() + } + + ctx, cancelFun := context.WithCancel(context.Background()) + + createResponse, err := createContainer(ctx, dockerCli, containerConfig, opts.name) + if err != nil { + reportError(stderr, cmdPath, err.Error(), true) + return runStartContainerErr(err) + } + if opts.sigProxy { + sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID) + defer signal.StopCatch(sigc) + } + + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintln(stdout, createResponse.ID) + }() + } + attach := config.AttachStdin || config.AttachStdout || config.AttachStderr + if attach { + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + close, err := attachContainer(ctx, dockerCli, &errCh, config, createResponse.ID) + defer close() + if err != nil { + return err + } + } + + statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, copts.autoRemove) + + //start the container + if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { + // If we have hijackedIOStreamer, we should notify + // hijackedIOStreamer we are going to exit and wait + // to avoid the terminal are not restored. + if attach { + cancelFun() + <-errCh + } + + reportError(stderr, cmdPath, err.Error(), false) + if copts.autoRemove { + // wait container to be removed + <-statusChan + } + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil { + fmt.Fprintln(stderr, "Error monitoring TTY size:", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + if _, ok := err.(term.EscapeError); ok { + // The user entered the detach escape sequence. + return nil + } + + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + status := <-statusChan + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +func attachContainer( + ctx context.Context, + dockerCli command.Cli, + errCh *chan error, + config *container.Config, + containerID string, +) (func(), error) { + stdout, stderr := dockerCli.Out(), dockerCli.Err() + var ( + out, cerr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = dockerCli.In() + } + if config.AttachStdout { + out = stdout + } + if config.AttachStderr { + if config.Tty { + cerr = stdout + } else { + cerr = stderr + } + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return nil, errAttach + } + + *errCh = promise.Go(func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: out, + errorStream: cerr, + resp: resp, + tty: config.Tty, + detachKeys: options.DetachKeys, + } + + if errHijack := streamer.stream(ctx); errHijack != nil { + return errHijack + } + return errAttach + }) + return resp.Close, nil +} + +// reportError is a utility method that prints a user-friendly message +// containing the error that occurred during parsing and a suggestion to get help +func reportError(stderr io.Writer, name string, str string, withHelp bool) { + str = strings.TrimSuffix(str, ".") + "." + if withHelp { + str += "\nSee '" + os.Args[0] + " " + name + " --help'." + } + fmt.Fprintf(stderr, "%s: %s\n", os.Args[0], str) +} + +// if container start fails with 'not found'/'no such' error, return 127 +// if container start fails with 'permission denied' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") + statusError := cli.StatusError{StatusCode: 125} + if strings.Contains(trimmedErr, "executable file not found") || + strings.Contains(trimmedErr, "no such file or directory") || + strings.Contains(trimmedErr, "system cannot find the file specified") { + statusError = cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { + statusError = cli.StatusError{StatusCode: 126} + } + + return statusError +} diff --git a/vendor/github.com/docker/cli/cli/command/container/start.go b/vendor/github.com/docker/cli/cli/command/container/start.go new file mode 100644 index 0000000..6cd4751 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/start.go @@ -0,0 +1,195 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type startOptions struct { + attach bool + openStdin bool + detachKeys string + checkpoint string + checkpointDir string + + containers []string +} + +// NewStartCommand creates a new cobra.Command for `docker start` +func NewStartCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts startOptions + + cmd := &cobra.Command{ + Use: "start [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Start one or more stopped containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") + flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint") + flags.SetAnnotation("checkpoint", "experimental", nil) + flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory") + flags.SetAnnotation("checkpoint-dir", "experimental", nil) + return cmd +} + +// nolint: gocyclo +func runStart(dockerCli *command.DockerCli, opts *startOptions) error { + ctx, cancelFun := context.WithCancel(context.Background()) + + if opts.attach || opts.openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if len(opts.containers) > 1 { + return errors.New("you cannot start and attach multiple containers at once") + } + + // 2. Attach to the container. + container := opts.containers[0] + c, err := dockerCli.Client().ContainerInspect(ctx, container) + if err != nil { + return err + } + + // We always use c.ID instead of container to maintain consistency during `docker start` + if !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, c.ID) + defer signal.StopCatch(sigc) + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: opts.openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + + if options.Stdin { + in = dockerCli.In() + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach return an ErrPersistEOF (connection closed) + // means server met an error and already put it in Hijacked connection, + // we would keep the error and read the detailed error message from hijacked connection + return errAttach + } + defer resp.Close() + cErr := promise.Go(func() error { + streamer := hijackedIOStreamer{ + streams: dockerCli, + inputStream: in, + outputStream: dockerCli.Out(), + errorStream: dockerCli.Err(), + resp: resp, + tty: c.Config.Tty, + detachKeys: options.DetachKeys, + } + + errHijack := streamer.stream(ctx) + if errHijack == nil { + return errAttach + } + return errHijack + }) + + // 3. We should open a channel for receiving status code of the container + // no matter it's detached, removed on daemon side(--rm) or exit normally. + statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove) + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + + // 4. Start the container. + if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil { + cancelFun() + <-cErr + if c.HostConfig.AutoRemove { + // wait container to be removed + <-statusChan + } + return err + } + + // 5. Wait for attachment to break. + if c.Config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil { + fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) + } + } + if attachErr := <-cErr; attachErr != nil { + if _, ok := err.(term.EscapeError); ok { + // The user entered the detach escape sequence. + return nil + } + return attachErr + } + + if status := <-statusChan; status != 0 { + return cli.StatusError{StatusCode: status} + } + } else if opts.checkpoint != "" { + if len(opts.containers) > 1 { + return errors.New("you cannot restore multiple containers at once") + } + container := opts.containers[0] + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + return dockerCli.Client().ContainerStart(ctx, container, startOptions) + + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return startContainersWithoutAttachments(ctx, dockerCli, opts.containers) + } + + return nil +} + +func startContainersWithoutAttachments(ctx context.Context, dockerCli *command.DockerCli, containers []string) error { + var failedContainers []string + for _, container := range containers { + if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { + fmt.Fprintln(dockerCli.Err(), err) + failedContainers = append(failedContainers, container) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + + if len(failedContainers) > 0 { + return errors.Errorf("Error: failed to start containers: %s", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/stats.go b/vendor/github.com/docker/cli/cli/command/container/stats.go new file mode 100644 index 0000000..50495f0 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/stats.go @@ -0,0 +1,243 @@ +package container + +import ( + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type statsOptions struct { + all bool + noStream bool + format string + containers []string +} + +// NewStatsCommand creates a new cobra.Command for `docker stats` +func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts statsOptions + + cmd := &cobra.Command{ + Use: "stats [OPTIONS] [CONTAINER...]", + Short: "Display a live stream of container(s) resource usage statistics", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStats(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + return cmd +} + +// runStats displays a live stream of resource usage statistics for one or more containers. +// This shows real-time information on CPU usage, memory usage, and network I/O. +// nolint: gocyclo +func runStats(dockerCli *command.DockerCli, opts *statsOptions) error { + showAll := len(opts.containers) == 0 + closeChan := make(chan error) + + ctx := context.Background() + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + + eventq, errq := dockerCli.Client().Events(ctx, options) + + // Whether we successfully subscribed to eventq or not, we can now + // unblock the main goroutine. + close(started) + + for { + select { + case event := <-eventq: + c <- event + case err := <-errq: + closeChan <- err + return + } + } + } + + // Get the daemonOSType if not set already + if daemonOSType == "" { + svctx := context.Background() + sv, err := dockerCli.Client().ServerVersion(svctx) + if err != nil { + return err + } + daemonOSType = sv.Os + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: opts.all, + } + cs, err := dockerCli.Client().ContainerList(ctx, options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := formatter.NewContainerStats(container.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := command.InitEventHandler() + eh.Handle("create", func(e events.Message) { + if opts.all { + s := formatter.NewContainerStats(e.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := formatter.NewContainerStats(e.ID[:12]) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !opts.all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range opts.containers { + s := formatter.NewContainerStats(name) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + if err := c.GetError(); err != nil { + errs = append(errs, err.Error()) + } + } + cStats.mu.Unlock() + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().StatsFormat) > 0 { + format = dockerCli.ConfigFile().StatsFormat + } else { + format = formatter.TableFormatKey + } + } + statsCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewStatsFormat(format, daemonOSType), + } + cleanScreen := func() { + if !opts.noStream { + fmt.Fprint(dockerCli.Out(), "\033[2J") + fmt.Fprint(dockerCli.Out(), "\033[H") + } + } + + var err error + for range time.Tick(500 * time.Millisecond) { + cleanScreen() + ccstats := []formatter.StatsEntry{} + cStats.mu.Lock() + for _, c := range cStats.cs { + ccstats = append(ccstats, c.GetStatistics()) + } + cStats.mu.Unlock() + if err = formatter.ContainerStatsWrite(statsCtx, ccstats, daemonOSType); err != nil { + break + } + if len(cStats.cs) == 0 && !showAll { + break + } + if opts.noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return err +} diff --git a/vendor/github.com/docker/cli/cli/command/container/stats_helpers.go b/vendor/github.com/docker/cli/cli/command/container/stats_helpers.go new file mode 100644 index 0000000..eb12cd0 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/stats_helpers.go @@ -0,0 +1,238 @@ +package container + +import ( + "encoding/json" + "io" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type stats struct { + mu sync.Mutex + cs []*formatter.ContainerStats +} + +// daemonOSType is set once we have at least one stat for a container +// from the daemon. It is used to ensure we print the right header based +// on the daemon platform. +var daemonOSType string + +func (s *stats) add(cs *formatter.ContainerStats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Container); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Container == cid { + return i, true + } + } + return -1, false +} + +func collect(ctx context.Context, s *formatter.ContainerStats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + logrus.Debugf("collecting stats for %s", s.Container) + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + response, err := cli.ContainerStats(ctx, s.Container, streamStats) + if err != nil { + s.SetError(err) + return + } + defer response.Body.Close() + + dec := json.NewDecoder(response.Body) + go func() { + for { + var ( + v *types.StatsJSON + memPercent, cpuPercent float64 + blkRead, blkWrite uint64 // Only used on Linux + mem, memLimit float64 + pidsStatsCurrent uint64 + ) + + if err := dec.Decode(&v); err != nil { + dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) + u <- err + if err == io.EOF { + break + } + time.Sleep(100 * time.Millisecond) + continue + } + + daemonOSType = response.OSType + + if daemonOSType != "windows" { + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) + blkRead, blkWrite = calculateBlockIO(v.BlkioStats) + mem = calculateMemUsageUnixNoCache(v.MemoryStats) + memLimit = float64(v.MemoryStats.Limit) + memPercent = calculateMemPercentUnixNoCache(memLimit, mem) + pidsStatsCurrent = v.PidsStats.Current + } else { + cpuPercent = calculateCPUPercentWindows(v) + blkRead = v.StorageStats.ReadSizeBytes + blkWrite = v.StorageStats.WriteSizeBytes + mem = float64(v.MemoryStats.PrivateWorkingSet) + } + netRx, netTx := calculateNetwork(v.Networks) + s.SetStatistics(formatter.StatsEntry{ + Name: v.Name, + ID: v.ID, + CPUPercentage: cpuPercent, + Memory: mem, + MemoryPercentage: memPercent, + MemoryLimit: memLimit, + NetworkRx: netRx, + NetworkTx: netTx, + BlockRead: float64(blkRead), + BlockWrite: float64(blkWrite), + PidsCurrent: pidsStatsCurrent, + }) + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.SetErrorAndReset(errors.New("timeout waiting for stats")) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + s.SetError(err) + if err == io.EOF { + break + } + if err != nil { + continue + } + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + onlineCPUs = float64(v.CPUStats.OnlineCPUs) + ) + + if onlineCPUs == 0.0 { + onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage)) + } + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0 + } + return cpuPercent +} + +func calculateCPUPercentWindows(v *types.StatsJSON) float64 { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + return float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + return 0.00 +} + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} + +// calculateMemUsageUnixNoCache calculate memory usage of the container. +// Page cache is intentionally excluded to avoid misinterpretation of the output. +func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { + return float64(mem.Usage - mem.Stats["cache"]) +} + +func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if limit != 0 { + return usedNoCache / limit * 100.0 + } + return 0 +} diff --git a/vendor/github.com/docker/cli/cli/command/container/stop.go b/vendor/github.com/docker/cli/cli/command/container/stop.go new file mode 100644 index 0000000..59d6d0b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/stop.go @@ -0,0 +1,67 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type stopOptions struct { + time int + timeChanged bool + + containers []string +} + +// NewStopCommand creates a new cobra.Command for `docker stop` +func NewStopCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts stopOptions + + cmd := &cobra.Command{ + Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Stop one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.timeChanged = cmd.Flags().Changed("time") + return runStop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") + return cmd +} + +func runStop(dockerCli *command.DockerCli, opts *stopOptions) error { + ctx := context.Background() + + var timeout *time.Duration + if opts.timeChanged { + timeoutValue := time.Duration(opts.time) * time.Second + timeout = &timeoutValue + } + + var errs []string + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { + return dockerCli.Client().ContainerStop(ctx, id, timeout) + }) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/top.go b/vendor/github.com/docker/cli/cli/command/container/top.go new file mode 100644 index 0000000..428ee27 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/top.go @@ -0,0 +1,57 @@ +package container + +import ( + "fmt" + "strings" + "text/tabwriter" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type topOptions struct { + container string + + args []string +} + +// NewTopCommand creates a new cobra.Command for `docker top` +func NewTopCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts topOptions + + cmd := &cobra.Command{ + Use: "top CONTAINER [ps OPTIONS]", + Short: "Display the running processes of a container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.args = args[1:] + return runTop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTop(dockerCli *command.DockerCli, opts *topOptions) error { + ctx := context.Background() + + procList, err := dockerCli.Client().ContainerTop(ctx, opts.container, opts.args) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/tty.go b/vendor/github.com/docker/cli/cli/command/container/tty.go new file mode 100644 index 0000000..fe123eb --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/tty.go @@ -0,0 +1,103 @@ +package container + +import ( + "fmt" + "os" + gosignal "os/signal" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" +) + +// resizeTtyTo resizes tty to specific height and width +func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) { + if height == 0 && width == 0 { + return + } + + options := types.ResizeOptions{ + Height: height, + Width: width, + } + + var err error + if isExec { + err = client.ContainerExecResize(ctx, id, options) + } else { + err = client.ContainerResize(ctx, id, options) + } + + if err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +// MonitorTtySize updates the container tty size when the terminal tty changes size +func MonitorTtySize(ctx context.Context, cli command.Cli, id string, isExec bool) error { + resizeTty := func() { + height, width := cli.Out().GetTtySize() + resizeTtyTo(ctx, cli.Client(), id, height, width, isExec) + } + + resizeTty() + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.Out().GetTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.Out().GetTtySize() + + if prevW != w || prevH != h { + resizeTty() + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + resizeTty() + } + }() + } + return nil +} + +// ForwardAllSignals forwards signals to the container +func ForwardAllSignals(ctx context.Context, cli command.Cli, cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD || s == signal.SIGPIPE { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s) + continue + } + + if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} diff --git a/vendor/github.com/docker/cli/cli/command/container/unpause.go b/vendor/github.com/docker/cli/cli/command/container/unpause.go new file mode 100644 index 0000000..b6d5e23 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/unpause.go @@ -0,0 +1,50 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type unpauseOptions struct { + containers []string +} + +// NewUnpauseCommand creates a new cobra.Command for `docker unpause` +func NewUnpauseCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts unpauseOptions + + cmd := &cobra.Command{ + Use: "unpause CONTAINER [CONTAINER...]", + Short: "Unpause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runUnpause(dockerCli, &opts) + }, + } + return cmd +} + +func runUnpause(dockerCli *command.DockerCli, opts *unpauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerUnpause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintln(dockerCli.Out(), container) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/update.go b/vendor/github.com/docker/cli/cli/command/container/update.go new file mode 100644 index 0000000..9cb74ae --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/update.go @@ -0,0 +1,133 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + containertypes "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type updateOptions struct { + blkioWeight uint16 + cpuPeriod int64 + cpuQuota int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpusetCpus string + cpusetMems string + cpuShares int64 + memory opts.MemBytes + memoryReservation opts.MemBytes + memorySwap opts.MemSwapBytes + kernelMemory opts.MemBytes + restartPolicy string + cpus opts.NanoCPUs + + nFlag int + + containers []string +} + +// NewUpdateCommand creates a new cobra.Command for `docker update` +func NewUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + var options updateOptions + + cmd := &cobra.Command{ + Use: "update [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Update configuration of one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.containers = args + options.nFlag = cmd.Flags().NFlag() + return runUpdate(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.Uint16Var(&options.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&options.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"}) + flags.Int64Var(&options.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"}) + flags.StringVar(&options.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.VarP(&options.memory, "memory", "m", "Memory limit") + flags.Var(&options.memoryReservation, "memory-reservation", "Memory soft limit") + flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Var(&options.kernelMemory, "kernel-memory", "Kernel memory limit") + flags.StringVar(&options.restartPolicy, "restart", "", "Restart policy to apply when a container exits") + + flags.Var(&options.cpus, "cpus", "Number of CPUs") + flags.SetAnnotation("cpus", "version", []string{"1.29"}) + + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, options *updateOptions) error { + var err error + + if options.nFlag == 0 { + return errors.New("you must provide one or more flags when using this command") + } + + var restartPolicy containertypes.RestartPolicy + if options.restartPolicy != "" { + restartPolicy, err = opts.ParseRestartPolicy(options.restartPolicy) + if err != nil { + return err + } + } + + resources := containertypes.Resources{ + BlkioWeight: options.blkioWeight, + CpusetCpus: options.cpusetCpus, + CpusetMems: options.cpusetMems, + CPUShares: options.cpuShares, + Memory: options.memory.Value(), + MemoryReservation: options.memoryReservation.Value(), + MemorySwap: options.memorySwap.Value(), + KernelMemory: options.kernelMemory.Value(), + CPUPeriod: options.cpuPeriod, + CPUQuota: options.cpuQuota, + CPURealtimePeriod: options.cpuRealtimePeriod, + CPURealtimeRuntime: options.cpuRealtimeRuntime, + NanoCPUs: options.cpus.Value(), + } + + updateConfig := containertypes.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + ctx := context.Background() + + var ( + warns []string + errs []string + ) + for _, container := range options.containers { + r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintln(dockerCli.Out(), container) + } + warns = append(warns, r.Warnings...) + } + if len(warns) > 0 { + fmt.Fprintln(dockerCli.Out(), strings.Join(warns, "\n")) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/container/utils.go b/vendor/github.com/docker/cli/cli/command/container/utils.go new file mode 100644 index 0000000..d9afe24 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/utils.go @@ -0,0 +1,172 @@ +package container + +import ( + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + clientapi "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +func waitExitOrRemoved(ctx context.Context, dockerCli *command.DockerCli, containerID string, waitRemove bool) <-chan int { + if len(containerID) == 0 { + // containerID can never be empty + panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") + } + + // Older versions used the Events API, and even older versions did not + // support server-side removal. This legacyWaitExitOrRemoved method + // preserves that old behavior and any issues it may have. + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.30") { + return legacyWaitExitOrRemoved(ctx, dockerCli, containerID, waitRemove) + } + + condition := container.WaitConditionNextExit + if waitRemove { + condition = container.WaitConditionRemoved + } + + resultC, errC := dockerCli.Client().ContainerWait(ctx, containerID, condition) + + statusC := make(chan int) + go func() { + select { + case result := <-resultC: + statusC <- int(result.StatusCode) + case err := <-errC: + logrus.Errorf("error waiting for container: %v", err) + statusC <- 125 + } + }() + + return statusC +} + +func legacyWaitExitOrRemoved(ctx context.Context, dockerCli *command.DockerCli, containerID string, waitRemove bool) <-chan int { + var removeErr error + statusChan := make(chan int) + exitCode := 125 + + // Get events via Events API + f := filters.NewArgs() + f.Add("type", "container") + f.Add("container", containerID) + options := types.EventsOptions{ + Filters: f, + } + eventCtx, cancel := context.WithCancel(ctx) + eventq, errq := dockerCli.Client().Events(eventCtx, options) + + eventProcessor := func(e events.Message) bool { + stopProcessing := false + switch e.Status { + case "die": + if v, ok := e.Actor.Attributes["exitCode"]; ok { + code, cerr := strconv.Atoi(v) + if cerr != nil { + logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) + } else { + exitCode = code + } + } + if !waitRemove { + stopProcessing = true + } else { + // If we are talking to an older daemon, `AutoRemove` is not supported. + // We need to fall back to the old behavior, which is client-side removal + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") { + go func() { + removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true}) + if removeErr != nil { + logrus.Errorf("error removing container: %v", removeErr) + cancel() // cancel the event Q + } + }() + } + } + case "detach": + exitCode = 0 + stopProcessing = true + case "destroy": + stopProcessing = true + } + return stopProcessing + } + + go func() { + defer func() { + statusChan <- exitCode // must always send an exit code or the caller will block + cancel() + }() + + for { + select { + case <-eventCtx.Done(): + if removeErr != nil { + return + } + case evt := <-eventq: + if eventProcessor(evt) { + return + } + case err := <-errq: + logrus.Errorf("error getting events from daemon: %v", err) + return + } + } + }() + + return statusChan +} + +// getExitCode performs an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(ctx context.Context, dockerCli command.Cli, containerID string) (bool, int, error) { + c, err := dockerCli.Client().ContainerInspect(ctx, containerID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !clientapi.IsErrConnectionFailed(err) { + return false, -1, err + } + return false, -1, nil + } + return c.State.Running, c.State.ExitCode, nil +} + +func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error { + if len(containers) == 0 { + return nil + } + const defaultParallel int = 50 + sem := make(chan struct{}, defaultParallel) + errChan := make(chan error) + + // make sure result is printed in correct order + output := map[string]chan error{} + for _, c := range containers { + output[c] = make(chan error, 1) + } + go func() { + for _, c := range containers { + err := <-output[c] + errChan <- err + } + }() + + go func() { + for _, c := range containers { + sem <- struct{}{} // Wait for active queue sem to drain. + go func(container string) { + output[container] <- op(ctx, container) + <-sem + }(c) + } + }() + return errChan +} diff --git a/vendor/github.com/docker/cli/cli/command/container/wait.go b/vendor/github.com/docker/cli/cli/command/container/wait.go new file mode 100644 index 0000000..f582469 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/container/wait.go @@ -0,0 +1,53 @@ +package container + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type waitOptions struct { + containers []string +} + +// NewWaitCommand creates a new cobra.Command for `docker wait` +func NewWaitCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts waitOptions + + cmd := &cobra.Command{ + Use: "wait CONTAINER [CONTAINER...]", + Short: "Block until one or more containers stop, then print their exit codes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runWait(dockerCli, &opts) + }, + } + + return cmd +} + +func runWait(dockerCli *command.DockerCli, opts *waitOptions) error { + ctx := context.Background() + + var errs []string + for _, container := range opts.containers { + resultC, errC := dockerCli.Client().ContainerWait(ctx, container, "") + + select { + case result := <-resultC: + fmt.Fprintf(dockerCli.Out(), "%d\n", result.StatusCode) + case err := <-errC: + errs = append(errs, err.Error()) + } + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/events_utils.go b/vendor/github.com/docker/cli/cli/command/events_utils.go new file mode 100644 index 0000000..c271b20 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/events_utils.go @@ -0,0 +1,47 @@ +package command + +import ( + "sync" + + "github.com/Sirupsen/logrus" + eventtypes "github.com/docker/docker/api/types/events" +) + +// EventHandler is abstract interface for user to customize +// own handle functions of each type of events +type EventHandler interface { + Handle(action string, h func(eventtypes.Message)) + Watch(c <-chan eventtypes.Message) +} + +// InitEventHandler initializes and returns an EventHandler +func InitEventHandler() EventHandler { + return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/checkpoint.go b/vendor/github.com/docker/cli/cli/command/formatter/checkpoint.go new file mode 100644 index 0000000..041fcaf --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/checkpoint.go @@ -0,0 +1,52 @@ +package formatter + +import "github.com/docker/docker/api/types" + +const ( + defaultCheckpointFormat = "table {{.Name}}" + + checkpointNameHeader = "CHECKPOINT NAME" +) + +// NewCheckpointFormat returns a format for use with a checkpoint Context +func NewCheckpointFormat(source string) Format { + switch source { + case TableFormatKey: + return defaultCheckpointFormat + } + return Format(source) +} + +// CheckpointWrite writes formatted checkpoints using the Context +func CheckpointWrite(ctx Context, checkpoints []types.Checkpoint) error { + render := func(format func(subContext subContext) error) error { + for _, checkpoint := range checkpoints { + if err := format(&checkpointContext{c: checkpoint}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newCheckpointContext(), render) +} + +type checkpointContext struct { + HeaderContext + c types.Checkpoint +} + +func newCheckpointContext() *checkpointContext { + cpCtx := checkpointContext{} + cpCtx.header = volumeHeaderContext{ + "Name": checkpointNameHeader, + } + return &cpCtx +} + +func (c *checkpointContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *checkpointContext) Name() string { + return c.c.Name +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/config.go b/vendor/github.com/docker/cli/cli/command/formatter/config.go new file mode 100644 index 0000000..72203f3 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/config.go @@ -0,0 +1,171 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultConfigTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}" + configIDHeader = "ID" + configCreatedHeader = "CREATED" + configUpdatedHeader = "UPDATED" + configInspectPrettyTemplate Format = `ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Created at: {{.CreatedAt}} +Updated at: {{.UpdatedAt}} +Data: +{{.Data}}` +) + +// NewConfigFormat returns a Format for rendering using a config Context +func NewConfigFormat(source string, quiet bool) Format { + switch source { + case PrettyFormatKey: + return configInspectPrettyTemplate + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultConfigTableFormat + } + return Format(source) +} + +// ConfigWrite writes the context +func ConfigWrite(ctx Context, configs []swarm.Config) error { + render := func(format func(subContext subContext) error) error { + for _, config := range configs { + configCtx := &configContext{c: config} + if err := format(configCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(newConfigContext(), render) +} + +func newConfigContext() *configContext { + cCtx := &configContext{} + + cCtx.header = map[string]string{ + "ID": configIDHeader, + "Name": nameHeader, + "CreatedAt": configCreatedHeader, + "UpdatedAt": configUpdatedHeader, + "Labels": labelsHeader, + } + return cCtx +} + +type configContext struct { + HeaderContext + c swarm.Config +} + +func (c *configContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *configContext) ID() string { + return c.c.ID +} + +func (c *configContext) Name() string { + return c.c.Spec.Annotations.Name +} + +func (c *configContext) CreatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.c.Meta.CreatedAt)) + " ago" +} + +func (c *configContext) UpdatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.c.Meta.UpdatedAt)) + " ago" +} + +func (c *configContext) Labels() string { + mapLabels := c.c.Spec.Annotations.Labels + if mapLabels == nil { + return "" + } + var joinLabels []string + for k, v := range mapLabels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *configContext) Label(name string) string { + if c.c.Spec.Annotations.Labels == nil { + return "" + } + return c.c.Spec.Annotations.Labels[name] +} + +// ConfigInspectWrite renders the context for a list of configs +func ConfigInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != configInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + configI, _, err := getRef(ref) + if err != nil { + return err + } + config, ok := configI.(swarm.Config) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&configInspectContext{Config: config}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&configInspectContext{}, render) +} + +type configInspectContext struct { + swarm.Config + subContext +} + +func (ctx *configInspectContext) ID() string { + return ctx.Config.ID +} + +func (ctx *configInspectContext) Name() string { + return ctx.Config.Spec.Name +} + +func (ctx *configInspectContext) Labels() map[string]string { + return ctx.Config.Spec.Labels +} + +func (ctx *configInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Config.CreatedAt) +} + +func (ctx *configInspectContext) UpdatedAt() string { + return command.PrettyPrint(ctx.Config.UpdatedAt) +} + +func (ctx *configInspectContext) Data() string { + if ctx.Config.Spec.Data == nil { + return "" + } + return string(ctx.Config.Spec.Data) +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/container.go b/vendor/github.com/docker/cli/cli/command/formatter/container.go new file mode 100644 index 0000000..5ee1309 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/container.go @@ -0,0 +1,345 @@ +package formatter + +import ( + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-units" +) + +const ( + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + + containerIDHeader = "CONTAINER ID" + namesHeader = "NAMES" + commandHeader = "COMMAND" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + mountsHeader = "MOUNTS" + localVolumes = "LOCAL VOLUMES" + networksHeader = "NETWORKS" +) + +// NewContainerFormat returns a Format for rendering using a Context +func NewContainerFormat(source string, quiet bool, size bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + format := defaultContainerTableFormat + if size { + format += `\t{{.Size}}` + } + return Format(format) + case RawFormatKey: + if quiet { + return `container_id: {{.ID}}` + } + format := `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{- pad .Status 1 0}} +names: {{.Names}} +labels: {{- pad .Labels 1 0}} +ports: {{- pad .Ports 1 0}} +` + if size { + format += `size: {{.Size}}\n` + } + return Format(format) + } + return Format(source) +} + +// ContainerWrite renders the context for a list of containers +func ContainerWrite(ctx Context, containers []types.Container) error { + render := func(format func(subContext subContext) error) error { + for _, container := range containers { + err := format(&containerContext{trunc: ctx.Trunc, c: container}) + if err != nil { + return err + } + } + return nil + } + return ctx.Write(newContainerContext(), render) +} + +type containerHeaderContext map[string]string + +func (c containerHeaderContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + return h +} + +type containerContext struct { + HeaderContext + trunc bool + c types.Container +} + +func newContainerContext() *containerContext { + containerCtx := containerContext{} + containerCtx.header = containerHeaderContext{ + "ID": containerIDHeader, + "Names": namesHeader, + "Image": imageHeader, + "Command": commandHeader, + "CreatedAt": createdAtHeader, + "RunningFor": runningForHeader, + "Ports": portsHeader, + "Status": statusHeader, + "Size": sizeHeader, + "Labels": labelsHeader, + "Mounts": mountsHeader, + "LocalVolumes": localVolumes, + "Networks": networksHeader, + } + return &containerCtx +} + +func (c *containerContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *containerContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + // truncate digest if no-trunc option was not selected + ref, err := reference.ParseNormalizedNamed(c.c.Image) + if err == nil { + if nt, ok := ref.(reference.NamedTagged); ok { + // case for when a tag is provided + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + return reference.FamiliarString(namedTagged) + } + } else { + // case for when a tag is not provided + named := reference.TrimNamed(ref) + return reference.FamiliarString(named) + } + } + } + + return c.c.Image +} + +func (c *containerContext) Command() string { + command := c.c.Command + if c.trunc { + command = stringutils.Ellipsis(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + return time.Unix(c.c.Created, 0).String() +} + +func (c *containerContext) RunningFor() string { + createdAt := time.Unix(c.c.Created, 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago" +} + +func (c *containerContext) Ports() string { + return DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + return c.c.Status +} + +func (c *containerContext) Size() string { + srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3) + sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = stringutils.Ellipsis(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +func (c *containerContext) LocalVolumes() string { + count := 0 + for _, m := range c.c.Mounts { + if m.Driver == "local" { + count++ + } + } + + return fmt.Sprintf("%d", count) +} + +func (c *containerContext) Networks() string { + if c.c.NetworkSettings == nil { + return "" + } + + networks := []string{} + for k := range c.c.NetworkSettings.Networks { + networks = append(networks, k) + } + + return strings.Join(networks, ",") +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first uint16 + last uint16 + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last uint16) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/custom.go b/vendor/github.com/docker/cli/cli/command/formatter/custom.go new file mode 100644 index 0000000..73487f6 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/custom.go @@ -0,0 +1,35 @@ +package formatter + +const ( + imageHeader = "IMAGE" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + nameHeader = "NAME" + driverHeader = "DRIVER" + scopeHeader = "SCOPE" +) + +type subContext interface { + FullHeader() interface{} +} + +// HeaderContext provides the subContext interface for managing headers +type HeaderContext struct { + header interface{} +} + +// FullHeader returns the header as an interface +func (c *HeaderContext) FullHeader() interface{} { + return c.header +} + +func stripNamePrefix(ss []string) []string { + sss := make([]string, len(ss)) + for i, s := range ss { + sss[i] = s[1:] + } + + return sss +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/diff.go b/vendor/github.com/docker/cli/cli/command/formatter/diff.go new file mode 100644 index 0000000..9b46819 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/diff.go @@ -0,0 +1,72 @@ +package formatter + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" +) + +const ( + defaultDiffTableFormat = "table {{.Type}}\t{{.Path}}" + + changeTypeHeader = "CHANGE TYPE" + pathHeader = "PATH" +) + +// NewDiffFormat returns a format for use with a diff Context +func NewDiffFormat(source string) Format { + switch source { + case TableFormatKey: + return defaultDiffTableFormat + } + return Format(source) +} + +// DiffWrite writes formatted diff using the Context +func DiffWrite(ctx Context, changes []container.ContainerChangeResponseItem) error { + + render := func(format func(subContext subContext) error) error { + for _, change := range changes { + if err := format(&diffContext{c: change}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newDiffContext(), render) +} + +type diffContext struct { + HeaderContext + c container.ContainerChangeResponseItem +} + +func newDiffContext() *diffContext { + diffCtx := diffContext{} + diffCtx.header = map[string]string{ + "Type": changeTypeHeader, + "Path": pathHeader, + } + return &diffCtx +} + +func (d *diffContext) MarshalJSON() ([]byte, error) { + return marshalJSON(d) +} + +func (d *diffContext) Type() string { + var kind string + switch d.c.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + return kind + +} + +func (d *diffContext) Path() string { + return d.c.Path +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go b/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go new file mode 100644 index 0000000..1713a9c --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go @@ -0,0 +1,359 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" + defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}" + defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" + defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" + + typeHeader = "TYPE" + totalHeader = "TOTAL" + activeHeader = "ACTIVE" + reclaimableHeader = "RECLAIMABLE" + containersHeader = "CONTAINERS" + sharedSizeHeader = "SHARED SIZE" + uniqueSizeHeader = "UNIQUE SiZE" +) + +// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct. +type DiskUsageContext struct { + Context + Verbose bool + LayersSize int64 + Images []*types.ImageSummary + Containers []*types.Container + Volumes []*types.Volume +} + +func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { + ctx.buffer = bytes.NewBufferString("") + ctx.header = "" + ctx.Format = Format(format) + ctx.preFormat() + + return ctx.parseFormat() +} + +// +// NewDiskUsageFormat returns a format for rendering an DiskUsageContext +func NewDiskUsageFormat(source string) Format { + switch source { + case TableFormatKey: + format := defaultDiskUsageTableFormat + return Format(format) + case RawFormatKey: + format := `type: {{.Type}} +total: {{.TotalCount}} +active: {{.Active}} +size: {{.Size}} +reclaimable: {{.Reclaimable}} +` + return Format(format) + } + return Format(source) +} + +func (ctx *DiskUsageContext) Write() (err error) { + if ctx.Verbose { + return ctx.verboseWrite() + } + ctx.buffer = bytes.NewBufferString("") + ctx.preFormat() + + tmpl, err := ctx.parseFormat() + if err != nil { + return err + } + + err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ + totalSize: ctx.LayersSize, + images: ctx.Images, + }) + if err != nil { + return err + } + err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ + containers: ctx.Containers, + }) + if err != nil { + return err + } + + err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ + volumes: ctx.Volumes, + }) + if err != nil { + return err + } + + diskUsageContainersCtx := diskUsageContainersContext{containers: []*types.Container{}} + diskUsageContainersCtx.header = map[string]string{ + "Type": typeHeader, + "TotalCount": totalHeader, + "Active": activeHeader, + "Size": sizeHeader, + "Reclaimable": reclaimableHeader, + } + ctx.postFormat(tmpl, &diskUsageContainersCtx) + + return err +} + +func (ctx *DiskUsageContext) verboseWrite() (err error) { + // First images + tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat) + if err != nil { + return + } + + ctx.Output.Write([]byte("Images space usage:\n\n")) + for _, i := range ctx.Images { + repo := "" + tag := "" + if len(i.RepoTags) > 0 && !isDangling(*i) { + // Only show the first tag + ref, err := reference.ParseNormalizedNamed(i.RepoTags[0]) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repo = reference.FamiliarName(ref) + tag = nt.Tag() + } + } + + err = ctx.contextFormat(tmpl, &imageContext{ + repo: repo, + tag: tag, + trunc: true, + i: *i, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, newImageContext()) + + // Now containers + ctx.Output.Write([]byte("\nContainers space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat) + if err != nil { + return + } + for _, c := range ctx.Containers { + // Don't display the virtual size + c.SizeRootFs = 0 + err = ctx.contextFormat(tmpl, &containerContext{ + trunc: true, + c: *c, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, newContainerContext()) + + // And volumes + ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat) + if err != nil { + return + } + for _, v := range ctx.Volumes { + err = ctx.contextFormat(tmpl, &volumeContext{ + v: *v, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, newVolumeContext()) + return +} + +type diskUsageImagesContext struct { + HeaderContext + totalSize int64 + images []*types.ImageSummary +} + +func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *diskUsageImagesContext) Type() string { + return "Images" +} + +func (c *diskUsageImagesContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.images)) +} + +func (c *diskUsageImagesContext) Active() string { + used := 0 + for _, i := range c.images { + if i.Containers > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageImagesContext) Size() string { + return units.HumanSize(float64(c.totalSize)) + +} + +func (c *diskUsageImagesContext) Reclaimable() string { + var used int64 + + for _, i := range c.images { + if i.Containers != 0 { + if i.VirtualSize == -1 || i.SharedSize == -1 { + continue + } + used += i.VirtualSize - i.SharedSize + } + } + + reclaimable := c.totalSize - used + if c.totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) + } + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} + +type diskUsageContainersContext struct { + HeaderContext + containers []*types.Container +} + +func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *diskUsageContainersContext) Type() string { + return "Containers" +} + +func (c *diskUsageContainersContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.containers)) +} + +func (c *diskUsageContainersContext) isActive(container types.Container) bool { + return strings.Contains(container.State, "running") || + strings.Contains(container.State, "paused") || + strings.Contains(container.State, "restarting") +} + +func (c *diskUsageContainersContext) Active() string { + used := 0 + for _, container := range c.containers { + if c.isActive(*container) { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageContainersContext) Size() string { + var size int64 + + for _, container := range c.containers { + size += container.SizeRw + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageContainersContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + for _, container := range c.containers { + if !c.isActive(*container) { + reclaimable += container.SizeRw + } + totalSize += container.SizeRw + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} + +type diskUsageVolumesContext struct { + HeaderContext + volumes []*types.Volume +} + +func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *diskUsageVolumesContext) Type() string { + return "Local Volumes" +} + +func (c *diskUsageVolumesContext) TotalCount() string { + return fmt.Sprintf("%d", len(c.volumes)) +} + +func (c *diskUsageVolumesContext) Active() string { + + used := 0 + for _, v := range c.volumes { + if v.UsageData.RefCount > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageVolumesContext) Size() string { + var size int64 + + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + size += v.UsageData.Size + } + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageVolumesContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + if v.UsageData.RefCount == 0 { + reclaimable += v.UsageData.Size + } + totalSize += v.UsageData.Size + } + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/formatter.go b/vendor/github.com/docker/cli/cli/command/formatter/formatter.go new file mode 100644 index 0000000..3f07aee --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/formatter.go @@ -0,0 +1,119 @@ +package formatter + +import ( + "bytes" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/pkg/templates" + "github.com/pkg/errors" +) + +// Format keys used to specify certain kinds of output formats +const ( + TableFormatKey = "table" + RawFormatKey = "raw" + PrettyFormatKey = "pretty" + + defaultQuietFormat = "{{.ID}}" +) + +// Format is the format string rendered using the Context +type Format string + +// IsTable returns true if the format is a table-type format +func (f Format) IsTable() bool { + return strings.HasPrefix(string(f), TableFormatKey) +} + +// Contains returns true if the format contains the substring +func (f Format) Contains(sub string) bool { + return strings.Contains(string(f), sub) +} + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format Format + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + finalFormat string + header interface{} + buffer *bytes.Buffer +} + +func (c *Context) preFormat() { + c.finalFormat = string(c.Format) + + // TODO: handle this in the Format type + if c.Format.IsTable() { + c.finalFormat = c.finalFormat[len(TableFormatKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + return tmpl, errors.Errorf("Template parsing error: %v\n", err) + } + return tmpl, err +} + +func (c *Context) postFormat(tmpl *template.Template, subContext subContext) { + if c.Format.IsTable() { + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + buffer := bytes.NewBufferString("") + tmpl.Funcs(templates.HeaderFunctions).Execute(buffer, subContext.FullHeader()) + buffer.WriteTo(t) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + return errors.Errorf("Template parsing error: %v\n", err) + } + if c.Format.IsTable() && c.header != nil { + c.header = subContext.FullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// SubFormat is a function type accepted by Write() +type SubFormat func(func(subContext) error) error + +// Write the template to the buffer using this Context +func (c *Context) Write(sub subContext, f SubFormat) error { + c.buffer = bytes.NewBufferString("") + c.preFormat() + + tmpl, err := c.parseFormat() + if err != nil { + return err + } + + subFormat := func(subContext subContext) error { + return c.contextFormat(tmpl, subContext) + } + if err := f(subFormat); err != nil { + return err + } + + c.postFormat(tmpl, sub) + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/history.go b/vendor/github.com/docker/cli/cli/command/formatter/history.go new file mode 100644 index 0000000..161aaa4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/history.go @@ -0,0 +1,107 @@ +package formatter + +import ( + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + units "github.com/docker/go-units" +) + +const ( + defaultHistoryTableFormat = "table {{.ID}}\t{{.CreatedSince}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}" + nonHumanHistoryTableFormat = "table {{.ID}}\t{{.CreatedAt}}\t{{.CreatedBy}}\t{{.Size}}\t{{.Comment}}" + + historyIDHeader = "IMAGE" + createdByHeader = "CREATED BY" + commentHeader = "COMMENT" +) + +// NewHistoryFormat returns a format for rendering an HistoryContext +func NewHistoryFormat(source string, quiet bool, human bool) Format { + switch source { + case TableFormatKey: + switch { + case quiet: + return defaultQuietFormat + case !human: + return nonHumanHistoryTableFormat + default: + return defaultHistoryTableFormat + } + } + + return Format(source) +} + +// HistoryWrite writes the context +func HistoryWrite(ctx Context, human bool, histories []image.HistoryResponseItem) error { + render := func(format func(subContext subContext) error) error { + for _, history := range histories { + historyCtx := &historyContext{trunc: ctx.Trunc, h: history, human: human} + if err := format(historyCtx); err != nil { + return err + } + } + return nil + } + historyCtx := &historyContext{} + historyCtx.header = map[string]string{ + "ID": historyIDHeader, + "CreatedSince": createdSinceHeader, + "CreatedAt": createdAtHeader, + "CreatedBy": createdByHeader, + "Size": sizeHeader, + "Comment": commentHeader, + } + return ctx.Write(historyCtx, render) +} + +type historyContext struct { + HeaderContext + trunc bool + human bool + h image.HistoryResponseItem +} + +func (c *historyContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *historyContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.h.ID) + } + return c.h.ID +} + +func (c *historyContext) CreatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(time.Unix(c.h.Created, 0))) +} + +func (c *historyContext) CreatedSince() string { + created := units.HumanDuration(time.Now().UTC().Sub(time.Unix(c.h.Created, 0))) + return created + " ago" +} + +func (c *historyContext) CreatedBy() string { + createdBy := strings.Replace(c.h.CreatedBy, "\t", " ", -1) + if c.trunc { + return stringutils.Ellipsis(createdBy, 45) + } + return createdBy +} + +func (c *historyContext) Size() string { + if c.human { + return units.HumanSizeWithPrecision(float64(c.h.Size), 3) + } + return strconv.FormatInt(c.h.Size, 10) +} + +func (c *historyContext) Comment() string { + return c.h.Comment +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/image.go b/vendor/github.com/docker/cli/cli/command/formatter/image.go new file mode 100644 index 0000000..cbd3eda --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/image.go @@ -0,0 +1,272 @@ +package formatter + +import ( + "fmt" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" +) + +const ( + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}}\t{{.Size}}" + + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" +) + +// ImageContext contains image specific information required by the formatter, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool +} + +func isDangling(image types.ImageSummary) bool { + return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" +} + +// NewImageFormat returns a format for rendering an ImageContext +func NewImageFormat(source string, quiet bool, digest bool) Format { + switch source { + case TableFormatKey: + switch { + case quiet: + return defaultQuietFormat + case digest: + return defaultImageTableFormatWithDigest + default: + return defaultImageTableFormat + } + case RawFormatKey: + switch { + case quiet: + return `image_id: {{.ID}}` + case digest: + return `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + default: + return `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + + format := Format(source) + if format.IsTable() && digest && !format.Contains("{{.Digest}}") { + format += "\t{{.Digest}}" + } + return format +} + +// ImageWrite writes the formatter images using the ImageContext +func ImageWrite(ctx ImageContext, images []types.ImageSummary) error { + render := func(format func(subContext subContext) error) error { + return imageFormat(ctx, images, format) + } + return ctx.Write(newImageContext(), render) +} + +func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext subContext) error) error { + for _, image := range images { + images := []*imageContext{} + if isDangling(image) { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: "", + tag: "", + digest: "", + }) + } else { + repoTags := map[string][]string{} + repoDigests := map[string][]string{} + + for _, refString := range image.RepoTags { + ref, err := reference.ParseNormalizedNamed(refString) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + familiarRef := reference.FamiliarName(ref) + repoTags[familiarRef] = append(repoTags[familiarRef], nt.Tag()) + } + } + for _, refString := range image.RepoDigests { + ref, err := reference.ParseNormalizedNamed(refString) + if err != nil { + continue + } + if c, ok := ref.(reference.Canonical); ok { + familiarRef := reference.FamiliarName(ref) + repoDigests[familiarRef] = append(repoDigests[familiarRef], c.Digest().String()) + } + } + + for repo, tags := range repoTags { + digests := repoDigests[repo] + + // Do not display digests as their own row + delete(repoDigests, repo) + + if !ctx.Digest { + // Ignore digest references, just show tag once + digests = nil + } + + for _, tag := range tags { + if len(digests) == 0 { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: "", + }) + continue + } + // Display the digests for each tag + for _, dgst := range digests { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: dgst, + }) + } + + } + } + + // Show rows for remaining digest only references + for repo, digests := range repoDigests { + // If digests are displayed, show row per digest + if ctx.Digest { + for _, dgst := range digests { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: "", + digest: dgst, + }) + } + } else { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: "", + }) + } + } + } + for _, imageCtx := range images { + if err := format(imageCtx); err != nil { + return err + } + } + } + return nil +} + +type imageContext struct { + HeaderContext + trunc bool + i types.ImageSummary + repo string + tag string + digest string +} + +func newImageContext() *imageContext { + imageCtx := imageContext{} + imageCtx.header = map[string]string{ + "ID": imageIDHeader, + "Repository": repositoryHeader, + "Tag": tagHeader, + "Digest": digestHeader, + "CreatedSince": createdSinceHeader, + "CreatedAt": createdAtHeader, + "Size": sizeHeader, + "Containers": containersHeader, + "VirtualSize": sizeHeader, + "SharedSize": sharedSizeHeader, + "UniqueSize": uniqueSizeHeader, + } + return &imageCtx +} + +func (c *imageContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *imageContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + return c.repo +} + +func (c *imageContext) Tag() string { + return c.tag +} + +func (c *imageContext) Digest() string { + return c.digest +} + +func (c *imageContext) CreatedSince() string { + createdAt := time.Unix(c.i.Created, 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago" +} + +func (c *imageContext) CreatedAt() string { + return time.Unix(c.i.Created, 0).String() +} + +func (c *imageContext) Size() string { + return units.HumanSizeWithPrecision(float64(c.i.Size), 3) +} + +func (c *imageContext) Containers() string { + if c.i.Containers == -1 { + return "N/A" + } + return fmt.Sprintf("%d", c.i.Containers) +} + +func (c *imageContext) VirtualSize() string { + return units.HumanSize(float64(c.i.VirtualSize)) +} + +func (c *imageContext) SharedSize() string { + if c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.SharedSize)) +} + +func (c *imageContext) UniqueSize() string { + if c.i.VirtualSize == -1 || c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize)) +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/network.go b/vendor/github.com/docker/cli/cli/command/formatter/network.go new file mode 100644 index 0000000..4aeebd1 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/network.go @@ -0,0 +1,129 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}" + + networkIDHeader = "NETWORK ID" + ipv6Header = "IPV6" + internalHeader = "INTERNAL" +) + +// NewNetworkFormat returns a Format for rendering using a network Context +func NewNetworkFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultNetworkTableFormat + case RawFormatKey: + if quiet { + return `network_id: {{.ID}}` + } + return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n` + } + return Format(source) +} + +// NetworkWrite writes the context +func NetworkWrite(ctx Context, networks []types.NetworkResource) error { + render := func(format func(subContext subContext) error) error { + for _, network := range networks { + networkCtx := &networkContext{trunc: ctx.Trunc, n: network} + if err := format(networkCtx); err != nil { + return err + } + } + return nil + } + networkCtx := networkContext{} + networkCtx.header = networkHeaderContext{ + "ID": networkIDHeader, + "Name": nameHeader, + "Driver": driverHeader, + "Scope": scopeHeader, + "IPv6": ipv6Header, + "Internal": internalHeader, + "Labels": labelsHeader, + "CreatedAt": createdAtHeader, + } + return ctx.Write(&networkCtx, render) +} + +type networkHeaderContext map[string]string + +func (c networkHeaderContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + return h +} + +type networkContext struct { + HeaderContext + trunc bool + n types.NetworkResource +} + +func (c *networkContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *networkContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.n.ID) + } + return c.n.ID +} + +func (c *networkContext) Name() string { + return c.n.Name +} + +func (c *networkContext) Driver() string { + return c.n.Driver +} + +func (c *networkContext) Scope() string { + return c.n.Scope +} + +func (c *networkContext) IPv6() string { + return fmt.Sprintf("%v", c.n.EnableIPv6) +} + +func (c *networkContext) Internal() string { + return fmt.Sprintf("%v", c.n.Internal) +} + +func (c *networkContext) Labels() string { + if c.n.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.n.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *networkContext) Label(name string) string { + if c.n.Labels == nil { + return "" + } + return c.n.Labels[name] +} + +func (c *networkContext) CreatedAt() string { + return c.n.Created.String() +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/node.go b/vendor/github.com/docker/cli/cli/command/formatter/node.go new file mode 100644 index 0000000..09bcd3f --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/node.go @@ -0,0 +1,330 @@ +package formatter + +import ( + "encoding/base64" + "fmt" + "reflect" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultNodeTableFormat = "table {{.ID}} {{if .Self}}*{{else}} {{ end }}\t{{.Hostname}}\t{{.Status}}\t{{.Availability}}\t{{.ManagerStatus}}" + nodeInspectPrettyTemplate Format = `ID: {{.ID}} +{{- if .Name }} +Name: {{.Name}} +{{- end }} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Hostname: {{.Hostname}} +Joined at: {{.CreatedAt}} +Status: + State: {{.StatusState}} + {{- if .HasStatusMessage}} + Message: {{.StatusMessage}} + {{- end}} + Availability: {{.SpecAvailability}} + {{- if .Status.Addr}} + Address: {{.StatusAddr}} + {{- end}} +{{- if .HasManagerStatus}} +Manager Status: + Address: {{.ManagerStatusAddr}} + Raft Status: {{.ManagerStatusReachability}} + {{- if .IsManagerStatusLeader}} + Leader: Yes + {{- else}} + Leader: No + {{- end}} +{{- end}} +Platform: + Operating System: {{.PlatformOS}} + Architecture: {{.PlatformArchitecture}} +Resources: + CPUs: {{.ResourceNanoCPUs}} + Memory: {{.ResourceMemory}} +{{- if .HasEnginePlugins}} +Plugins: +{{- range $k, $v := .EnginePlugins }} + {{ $k }}:{{if $v }} {{ $v }}{{ end }} +{{- end }} +{{- end }} +Engine Version: {{.EngineVersion}} +{{- if .EngineLabels}} +Engine Labels: +{{- range $k, $v := .EngineLabels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{- end }} +{{- if .HasTLSInfo}} +TLS Info: + TrustRoot: +{{.TLSInfoTrustRoot}} + Issuer Subject: {{.TLSInfoCertIssuerSubject}} + Issuer Public Key: {{.TLSInfoCertIssuerPublicKey}} +{{- end}}` + nodeIDHeader = "ID" + selfHeader = "" + hostnameHeader = "HOSTNAME" + availabilityHeader = "AVAILABILITY" + managerStatusHeader = "MANAGER STATUS" + tlsStatusHeader = "TLS STATUS" +) + +// NewNodeFormat returns a Format for rendering using a node Context +func NewNodeFormat(source string, quiet bool) Format { + switch source { + case PrettyFormatKey: + return nodeInspectPrettyTemplate + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultNodeTableFormat + case RawFormatKey: + if quiet { + return `node_id: {{.ID}}` + } + return `node_id: {{.ID}}\nhostname: {{.Hostname}}\nstatus: {{.Status}}\navailability: {{.Availability}}\nmanager_status: {{.ManagerStatus}}\n` + } + return Format(source) +} + +// NodeWrite writes the context +func NodeWrite(ctx Context, nodes []swarm.Node, info types.Info) error { + render := func(format func(subContext subContext) error) error { + for _, node := range nodes { + nodeCtx := &nodeContext{n: node, info: info} + if err := format(nodeCtx); err != nil { + return err + } + } + return nil + } + header := nodeHeaderContext{ + "ID": nodeIDHeader, + "Self": selfHeader, + "Hostname": hostnameHeader, + "Status": statusHeader, + "Availability": availabilityHeader, + "ManagerStatus": managerStatusHeader, + "TLSStatus": tlsStatusHeader, + } + nodeCtx := nodeContext{} + nodeCtx.header = header + return ctx.Write(&nodeCtx, render) +} + +type nodeHeaderContext map[string]string + +type nodeContext struct { + HeaderContext + n swarm.Node + info types.Info +} + +func (c *nodeContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *nodeContext) ID() string { + return c.n.ID +} + +func (c *nodeContext) Self() bool { + return c.n.ID == c.info.Swarm.NodeID +} + +func (c *nodeContext) Hostname() string { + return c.n.Description.Hostname +} + +func (c *nodeContext) Status() string { + return command.PrettyPrint(string(c.n.Status.State)) +} + +func (c *nodeContext) Availability() string { + return command.PrettyPrint(string(c.n.Spec.Availability)) +} + +func (c *nodeContext) ManagerStatus() string { + reachability := "" + if c.n.ManagerStatus != nil { + if c.n.ManagerStatus.Leader { + reachability = "Leader" + } else { + reachability = string(c.n.ManagerStatus.Reachability) + } + } + return command.PrettyPrint(reachability) +} + +func (c *nodeContext) TLSStatus() string { + if c.info.Swarm.Cluster == nil || reflect.DeepEqual(c.info.Swarm.Cluster.TLSInfo, swarm.TLSInfo{}) || reflect.DeepEqual(c.n.Description.TLSInfo, swarm.TLSInfo{}) { + return "Unknown" + } + if reflect.DeepEqual(c.n.Description.TLSInfo, c.info.Swarm.Cluster.TLSInfo) { + return "Ready" + } + return "Needs Rotation" +} + +// NodeInspectWrite renders the context for a list of nodes +func NodeInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != nodeInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + nodeI, _, err := getRef(ref) + if err != nil { + return err + } + node, ok := nodeI.(swarm.Node) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&nodeInspectContext{Node: node}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&nodeInspectContext{}, render) +} + +type nodeInspectContext struct { + swarm.Node + subContext +} + +func (ctx *nodeInspectContext) ID() string { + return ctx.Node.ID +} + +func (ctx *nodeInspectContext) Name() string { + return ctx.Node.Spec.Name +} + +func (ctx *nodeInspectContext) Labels() map[string]string { + return ctx.Node.Spec.Labels +} + +func (ctx *nodeInspectContext) Hostname() string { + return ctx.Node.Description.Hostname +} + +func (ctx *nodeInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Node.CreatedAt) +} + +func (ctx *nodeInspectContext) StatusState() string { + return command.PrettyPrint(ctx.Node.Status.State) +} + +func (ctx *nodeInspectContext) HasStatusMessage() bool { + return ctx.Node.Status.Message != "" +} + +func (ctx *nodeInspectContext) StatusMessage() string { + return command.PrettyPrint(ctx.Node.Status.Message) +} + +func (ctx *nodeInspectContext) SpecAvailability() string { + return command.PrettyPrint(ctx.Node.Spec.Availability) +} + +func (ctx *nodeInspectContext) HasStatusAddr() bool { + return ctx.Node.Status.Addr != "" +} + +func (ctx *nodeInspectContext) StatusAddr() string { + return ctx.Node.Status.Addr +} + +func (ctx *nodeInspectContext) HasManagerStatus() bool { + return ctx.Node.ManagerStatus != nil +} + +func (ctx *nodeInspectContext) ManagerStatusAddr() string { + return ctx.Node.ManagerStatus.Addr +} + +func (ctx *nodeInspectContext) ManagerStatusReachability() string { + return command.PrettyPrint(ctx.Node.ManagerStatus.Reachability) +} + +func (ctx *nodeInspectContext) IsManagerStatusLeader() bool { + return ctx.Node.ManagerStatus.Leader +} + +func (ctx *nodeInspectContext) PlatformOS() string { + return ctx.Node.Description.Platform.OS +} + +func (ctx *nodeInspectContext) PlatformArchitecture() string { + return ctx.Node.Description.Platform.Architecture +} + +func (ctx *nodeInspectContext) ResourceNanoCPUs() int { + if ctx.Node.Description.Resources.NanoCPUs == 0 { + return int(0) + } + return int(ctx.Node.Description.Resources.NanoCPUs) / 1e9 +} + +func (ctx *nodeInspectContext) ResourceMemory() string { + if ctx.Node.Description.Resources.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Node.Description.Resources.MemoryBytes)) +} + +func (ctx *nodeInspectContext) HasEnginePlugins() bool { + return len(ctx.Node.Description.Engine.Plugins) > 0 +} + +func (ctx *nodeInspectContext) EnginePlugins() map[string]string { + pluginMap := map[string][]string{} + for _, p := range ctx.Node.Description.Engine.Plugins { + pluginMap[p.Type] = append(pluginMap[p.Type], p.Name) + } + + pluginNamesByType := map[string]string{} + for k, v := range pluginMap { + pluginNamesByType[k] = strings.Join(v, ", ") + } + return pluginNamesByType +} + +func (ctx *nodeInspectContext) EngineLabels() map[string]string { + return ctx.Node.Description.Engine.Labels +} + +func (ctx *nodeInspectContext) EngineVersion() string { + return ctx.Node.Description.Engine.EngineVersion +} + +func (ctx *nodeInspectContext) HasTLSInfo() bool { + tlsInfo := ctx.Node.Description.TLSInfo + return !reflect.DeepEqual(tlsInfo, swarm.TLSInfo{}) +} + +func (ctx *nodeInspectContext) TLSInfoTrustRoot() string { + return ctx.Node.Description.TLSInfo.TrustRoot +} + +func (ctx *nodeInspectContext) TLSInfoCertIssuerPublicKey() string { + return base64.StdEncoding.EncodeToString(ctx.Node.Description.TLSInfo.CertIssuerPublicKey) +} + +func (ctx *nodeInspectContext) TLSInfoCertIssuerSubject() string { + return base64.StdEncoding.EncodeToString(ctx.Node.Description.TLSInfo.CertIssuerSubject) +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/plugin.go b/vendor/github.com/docker/cli/cli/command/formatter/plugin.go new file mode 100644 index 0000000..2b71281 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/plugin.go @@ -0,0 +1,95 @@ +package formatter + +import ( + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" +) + +const ( + defaultPluginTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Description}}\t{{.Enabled}}" + + pluginIDHeader = "ID" + descriptionHeader = "DESCRIPTION" + enabledHeader = "ENABLED" +) + +// NewPluginFormat returns a Format for rendering using a plugin Context +func NewPluginFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultPluginTableFormat + case RawFormatKey: + if quiet { + return `plugin_id: {{.ID}}` + } + return `plugin_id: {{.ID}}\nname: {{.Name}}\ndescription: {{.Description}}\nenabled: {{.Enabled}}\n` + } + return Format(source) +} + +// PluginWrite writes the context +func PluginWrite(ctx Context, plugins []*types.Plugin) error { + render := func(format func(subContext subContext) error) error { + for _, plugin := range plugins { + pluginCtx := &pluginContext{trunc: ctx.Trunc, p: *plugin} + if err := format(pluginCtx); err != nil { + return err + } + } + return nil + } + pluginCtx := pluginContext{} + pluginCtx.header = map[string]string{ + "ID": pluginIDHeader, + "Name": nameHeader, + "Description": descriptionHeader, + "Enabled": enabledHeader, + "PluginReference": imageHeader, + } + return ctx.Write(&pluginCtx, render) +} + +type pluginContext struct { + HeaderContext + trunc bool + p types.Plugin +} + +func (c *pluginContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *pluginContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.p.ID) + } + return c.p.ID +} + +func (c *pluginContext) Name() string { + return c.p.Name +} + +func (c *pluginContext) Description() string { + desc := strings.Replace(c.p.Config.Description, "\n", "", -1) + desc = strings.Replace(desc, "\r", "", -1) + if c.trunc { + desc = stringutils.Ellipsis(desc, 45) + } + + return desc +} + +func (c *pluginContext) Enabled() bool { + return c.p.Enabled +} + +func (c *pluginContext) PluginReference() string { + return c.p.PluginReference +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/reflect.go b/vendor/github.com/docker/cli/cli/command/formatter/reflect.go new file mode 100644 index 0000000..fd59404 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/reflect.go @@ -0,0 +1,66 @@ +package formatter + +import ( + "encoding/json" + "reflect" + "unicode" + + "github.com/pkg/errors" +) + +func marshalJSON(x interface{}) ([]byte, error) { + m, err := marshalMap(x) + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// marshalMap marshals x to map[string]interface{} +func marshalMap(x interface{}) (map[string]interface{}, error) { + val := reflect.ValueOf(x) + if val.Kind() != reflect.Ptr { + return nil, errors.Errorf("expected a pointer to a struct, got %v", val.Kind()) + } + if val.IsNil() { + return nil, errors.Errorf("expected a pointer to a struct, got nil pointer") + } + valElem := val.Elem() + if valElem.Kind() != reflect.Struct { + return nil, errors.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) + } + typ := val.Type() + m := make(map[string]interface{}) + for i := 0; i < val.NumMethod(); i++ { + k, v, err := marshalForMethod(typ.Method(i), val.Method(i)) + if err != nil { + return nil, err + } + if k != "" { + m[k] = v + } + } + return m, nil +} + +var unmarshallableNames = map[string]struct{}{"FullHeader": {}} + +// marshalForMethod returns the map key and the map value for marshalling the method. +// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") +func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) { + if val.Kind() != reflect.Func { + return "", nil, errors.Errorf("expected func, got %v", val.Kind()) + } + name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() + _, blackListed := unmarshallableNames[name] + // FIXME: In text/template, (numOut == 2) is marshallable, + // if the type of the second param is error. + marshallable := unicode.IsUpper(rune(name[0])) && !blackListed && + numIn == 0 && numOut == 1 + if !marshallable { + return "", nil, nil + } + result := val.Call(make([]reflect.Value, numIn)) + intf := result[0].Interface() + return name, intf, nil +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/secret.go b/vendor/github.com/docker/cli/cli/command/formatter/secret.go new file mode 100644 index 0000000..04bf944 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/secret.go @@ -0,0 +1,162 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types/swarm" + units "github.com/docker/go-units" +) + +const ( + defaultSecretTableFormat = "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}" + secretIDHeader = "ID" + secretCreatedHeader = "CREATED" + secretUpdatedHeader = "UPDATED" + secretInspectPrettyTemplate Format = `ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + - {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Created at: {{.CreatedAt}} +Updated at: {{.UpdatedAt}}` +) + +// NewSecretFormat returns a Format for rendering using a secret Context +func NewSecretFormat(source string, quiet bool) Format { + switch source { + case PrettyFormatKey: + return secretInspectPrettyTemplate + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultSecretTableFormat + } + return Format(source) +} + +// SecretWrite writes the context +func SecretWrite(ctx Context, secrets []swarm.Secret) error { + render := func(format func(subContext subContext) error) error { + for _, secret := range secrets { + secretCtx := &secretContext{s: secret} + if err := format(secretCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(newSecretContext(), render) +} + +func newSecretContext() *secretContext { + sCtx := &secretContext{} + + sCtx.header = map[string]string{ + "ID": secretIDHeader, + "Name": nameHeader, + "CreatedAt": secretCreatedHeader, + "UpdatedAt": secretUpdatedHeader, + "Labels": labelsHeader, + } + return sCtx +} + +type secretContext struct { + HeaderContext + s swarm.Secret +} + +func (c *secretContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *secretContext) ID() string { + return c.s.ID +} + +func (c *secretContext) Name() string { + return c.s.Spec.Annotations.Name +} + +func (c *secretContext) CreatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.CreatedAt)) + " ago" +} + +func (c *secretContext) UpdatedAt() string { + return units.HumanDuration(time.Now().UTC().Sub(c.s.Meta.UpdatedAt)) + " ago" +} + +func (c *secretContext) Labels() string { + mapLabels := c.s.Spec.Annotations.Labels + if mapLabels == nil { + return "" + } + var joinLabels []string + for k, v := range mapLabels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *secretContext) Label(name string) string { + if c.s.Spec.Annotations.Labels == nil { + return "" + } + return c.s.Spec.Annotations.Labels[name] +} + +// SecretInspectWrite renders the context for a list of secrets +func SecretInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != secretInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + secretI, _, err := getRef(ref) + if err != nil { + return err + } + secret, ok := secretI.(swarm.Secret) + if !ok { + return fmt.Errorf("got wrong object to inspect :%v", ok) + } + if err := format(&secretInspectContext{Secret: secret}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&secretInspectContext{}, render) +} + +type secretInspectContext struct { + swarm.Secret + subContext +} + +func (ctx *secretInspectContext) ID() string { + return ctx.Secret.ID +} + +func (ctx *secretInspectContext) Name() string { + return ctx.Secret.Spec.Name +} + +func (ctx *secretInspectContext) Labels() map[string]string { + return ctx.Secret.Spec.Labels +} + +func (ctx *secretInspectContext) CreatedAt() string { + return command.PrettyPrint(ctx.Secret.CreatedAt) +} + +func (ctx *secretInspectContext) UpdatedAt() string { + return command.PrettyPrint(ctx.Secret.UpdatedAt) +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/service.go b/vendor/github.com/docker/cli/cli/command/formatter/service.go new file mode 100644 index 0000000..27200b8 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/service.go @@ -0,0 +1,535 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/stringid" + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +const serviceInspectPrettyTemplate Format = ` +ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Service Mode: +{{- if .IsModeGlobal }} Global +{{- else if .IsModeReplicated }} Replicated +{{- if .ModeReplicatedReplicas }} + Replicas: {{ .ModeReplicatedReplicas }} +{{- end }}{{ end }} +{{- if .HasUpdateStatus }} +UpdateStatus: + State: {{ .UpdateStatusState }} +{{- if .HasUpdateStatusStarted }} + Started: {{ .UpdateStatusStarted }} +{{- end }} +{{- if .UpdateIsCompleted }} + Completed: {{ .UpdateStatusCompleted }} +{{- end }} + Message: {{ .UpdateStatusMessage }} +{{- end }} +Placement: +{{- if .TaskPlacementConstraints }} + Constraints: {{ .TaskPlacementConstraints }} +{{- end }} +{{- if .TaskPlacementPreferences }} + Preferences: {{ .TaskPlacementPreferences }} +{{- end }} +{{- if .HasUpdateConfig }} +UpdateConfig: + Parallelism: {{ .UpdateParallelism }} +{{- if .HasUpdateDelay}} + Delay: {{ .UpdateDelay }} +{{- end }} + On failure: {{ .UpdateOnFailure }} +{{- if .HasUpdateMonitor}} + Monitoring Period: {{ .UpdateMonitor }} +{{- end }} + Max failure ratio: {{ .UpdateMaxFailureRatio }} + Update order: {{ .UpdateOrder }} +{{- end }} +{{- if .HasRollbackConfig }} +RollbackConfig: + Parallelism: {{ .RollbackParallelism }} +{{- if .HasRollbackDelay}} + Delay: {{ .RollbackDelay }} +{{- end }} + On failure: {{ .RollbackOnFailure }} +{{- if .HasRollbackMonitor}} + Monitoring Period: {{ .RollbackMonitor }} +{{- end }} + Max failure ratio: {{ .RollbackMaxFailureRatio }} + Rollback order: {{ .RollbackOrder }} +{{- end }} +ContainerSpec: + Image: {{ .ContainerImage }} +{{- if .ContainerArgs }} + Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} +{{- end -}} +{{- if .ContainerEnv }} + Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} +{{- end -}} +{{- if .ContainerWorkDir }} + Dir: {{ .ContainerWorkDir }} +{{- end -}} +{{- if .ContainerUser }} + User: {{ .ContainerUser }} +{{- end }} +{{- if .ContainerMounts }} +Mounts: +{{- end }} +{{- range $mount := .ContainerMounts }} + Target = {{ $mount.Target }} + Source = {{ $mount.Source }} + ReadOnly = {{ $mount.ReadOnly }} + Type = {{ $mount.Type }} +{{- end -}} +{{- if .HasResources }} +Resources: +{{- if .HasResourceReservations }} + Reservations: +{{- if gt .ResourceReservationNanoCPUs 0.0 }} + CPU: {{ .ResourceReservationNanoCPUs }} +{{- end }} +{{- if .ResourceReservationMemory }} + Memory: {{ .ResourceReservationMemory }} +{{- end }}{{ end }} +{{- if .HasResourceLimits }} + Limits: +{{- if gt .ResourceLimitsNanoCPUs 0.0 }} + CPU: {{ .ResourceLimitsNanoCPUs }} +{{- end }} +{{- if .ResourceLimitMemory }} + Memory: {{ .ResourceLimitMemory }} +{{- end }}{{ end }}{{ end }} +{{- if .Networks }} +Networks: +{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} +Endpoint Mode: {{ .EndpointMode }} +{{- if .Ports }} +Ports: +{{- range $port := .Ports }} + PublishedPort = {{ $port.PublishedPort }} + Protocol = {{ $port.Protocol }} + TargetPort = {{ $port.TargetPort }} + PublishMode = {{ $port.PublishMode }} +{{- end }} {{ end -}} +` + +// NewServiceFormat returns a Format for rendering using a Context +func NewServiceFormat(source string) Format { + switch source { + case PrettyFormatKey: + return serviceInspectPrettyTemplate + default: + return Format(strings.TrimPrefix(source, RawFormatKey)) + } +} + +func resolveNetworks(service swarm.Service, getNetwork inspect.GetRefFunc) map[string]string { + networkNames := make(map[string]string) + for _, network := range service.Spec.TaskTemplate.Networks { + if resolved, _, err := getNetwork(network.Target); err == nil { + if resolvedNetwork, ok := resolved.(types.NetworkResource); ok { + networkNames[resolvedNetwork.ID] = resolvedNetwork.Name + } + } + } + return networkNames +} + +// ServiceInspectWrite renders the context for a list of services +func ServiceInspectWrite(ctx Context, refs []string, getRef, getNetwork inspect.GetRefFunc) error { + if ctx.Format != serviceInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + serviceI, _, err := getRef(ref) + if err != nil { + return err + } + service, ok := serviceI.(swarm.Service) + if !ok { + return errors.Errorf("got wrong object to inspect") + } + if err := format(&serviceInspectContext{Service: service, networkNames: resolveNetworks(service, getNetwork)}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&serviceInspectContext{}, render) +} + +type serviceInspectContext struct { + swarm.Service + subContext + + // networkNames is a map from network IDs (as found in + // Networks[x].Target) to network names. + networkNames map[string]string +} + +func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { + return marshalJSON(ctx) +} + +func (ctx *serviceInspectContext) ID() string { + return ctx.Service.ID +} + +func (ctx *serviceInspectContext) Name() string { + return ctx.Service.Spec.Name +} + +func (ctx *serviceInspectContext) Labels() map[string]string { + return ctx.Service.Spec.Labels +} + +func (ctx *serviceInspectContext) IsModeGlobal() bool { + return ctx.Service.Spec.Mode.Global != nil +} + +func (ctx *serviceInspectContext) IsModeReplicated() bool { + return ctx.Service.Spec.Mode.Replicated != nil +} + +func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { + return ctx.Service.Spec.Mode.Replicated.Replicas +} + +func (ctx *serviceInspectContext) HasUpdateStatus() bool { + return ctx.Service.UpdateStatus != nil && ctx.Service.UpdateStatus.State != "" +} + +func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { + return ctx.Service.UpdateStatus.State +} + +func (ctx *serviceInspectContext) HasUpdateStatusStarted() bool { + return ctx.Service.UpdateStatus.StartedAt != nil +} + +func (ctx *serviceInspectContext) UpdateStatusStarted() string { + return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.StartedAt)) + " ago" +} + +func (ctx *serviceInspectContext) UpdateIsCompleted() bool { + return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted && ctx.Service.UpdateStatus.CompletedAt != nil +} + +func (ctx *serviceInspectContext) UpdateStatusCompleted() string { + return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.CompletedAt)) + " ago" +} + +func (ctx *serviceInspectContext) UpdateStatusMessage() string { + return ctx.Service.UpdateStatus.Message +} + +func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { + if ctx.Service.Spec.TaskTemplate.Placement != nil { + return ctx.Service.Spec.TaskTemplate.Placement.Constraints + } + return nil +} + +func (ctx *serviceInspectContext) TaskPlacementPreferences() []string { + if ctx.Service.Spec.TaskTemplate.Placement == nil { + return nil + } + var strings []string + for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences { + if pref.Spread != nil { + strings = append(strings, "spread="+pref.Spread.SpreadDescriptor) + } + } + return strings +} + +func (ctx *serviceInspectContext) HasUpdateConfig() bool { + return ctx.Service.Spec.UpdateConfig != nil +} + +func (ctx *serviceInspectContext) UpdateParallelism() uint64 { + return ctx.Service.Spec.UpdateConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasUpdateDelay() bool { + return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateDelay() time.Duration { + return ctx.Service.Spec.UpdateConfig.Delay +} + +func (ctx *serviceInspectContext) UpdateOnFailure() string { + return ctx.Service.Spec.UpdateConfig.FailureAction +} + +func (ctx *serviceInspectContext) UpdateOrder() string { + return ctx.Service.Spec.UpdateConfig.Order +} + +func (ctx *serviceInspectContext) HasUpdateMonitor() bool { + return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { + return ctx.Service.Spec.UpdateConfig.Monitor +} + +func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { + return ctx.Service.Spec.UpdateConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) HasRollbackConfig() bool { + return ctx.Service.Spec.RollbackConfig != nil +} + +func (ctx *serviceInspectContext) RollbackParallelism() uint64 { + return ctx.Service.Spec.RollbackConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasRollbackDelay() bool { + return ctx.Service.Spec.RollbackConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) RollbackDelay() time.Duration { + return ctx.Service.Spec.RollbackConfig.Delay +} + +func (ctx *serviceInspectContext) RollbackOnFailure() string { + return ctx.Service.Spec.RollbackConfig.FailureAction +} + +func (ctx *serviceInspectContext) HasRollbackMonitor() bool { + return ctx.Service.Spec.RollbackConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) RollbackMonitor() time.Duration { + return ctx.Service.Spec.RollbackConfig.Monitor +} + +func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 { + return ctx.Service.Spec.RollbackConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) RollbackOrder() string { + return ctx.Service.Spec.RollbackConfig.Order +} + +func (ctx *serviceInspectContext) ContainerImage() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image +} + +func (ctx *serviceInspectContext) ContainerArgs() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args +} + +func (ctx *serviceInspectContext) ContainerEnv() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env +} + +func (ctx *serviceInspectContext) ContainerWorkDir() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir +} + +func (ctx *serviceInspectContext) ContainerUser() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.User +} + +func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts +} + +func (ctx *serviceInspectContext) HasResources() bool { + return ctx.Service.Spec.TaskTemplate.Resources != nil +} + +func (ctx *serviceInspectContext) HasResourceReservations() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { + return float64(0) + } + return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceReservationMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) +} + +func (ctx *serviceInspectContext) HasResourceLimits() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { + return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceLimitMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) +} + +func (ctx *serviceInspectContext) Networks() []string { + var out []string + for _, n := range ctx.Service.Spec.TaskTemplate.Networks { + if name, ok := ctx.networkNames[n.Target]; ok { + out = append(out, name) + } else { + out = append(out, n.Target) + } + } + return out +} + +func (ctx *serviceInspectContext) EndpointMode() string { + if ctx.Service.Spec.EndpointSpec == nil { + return "" + } + + return string(ctx.Service.Spec.EndpointSpec.Mode) +} + +func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { + return ctx.Service.Endpoint.Ports +} + +const ( + defaultServiceTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Mode}}\t{{.Replicas}}\t{{.Image}}\t{{.Ports}}" + + serviceIDHeader = "ID" + modeHeader = "MODE" + replicasHeader = "REPLICAS" +) + +// NewServiceListFormat returns a Format for rendering using a service Context +func NewServiceListFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultServiceTableFormat + case RawFormatKey: + if quiet { + return `id: {{.ID}}` + } + return `id: {{.ID}}\nname: {{.Name}}\nmode: {{.Mode}}\nreplicas: {{.Replicas}}\nimage: {{.Image}}\nports: {{.Ports}}\n` + } + return Format(source) +} + +// ServiceListInfo stores the information about mode and replicas to be used by template +type ServiceListInfo struct { + Mode string + Replicas string +} + +// ServiceListWrite writes the context +func ServiceListWrite(ctx Context, services []swarm.Service, info map[string]ServiceListInfo) error { + render := func(format func(subContext subContext) error) error { + for _, service := range services { + serviceCtx := &serviceContext{service: service, mode: info[service.ID].Mode, replicas: info[service.ID].Replicas} + if err := format(serviceCtx); err != nil { + return err + } + } + return nil + } + serviceCtx := serviceContext{} + serviceCtx.header = map[string]string{ + "ID": serviceIDHeader, + "Name": nameHeader, + "Mode": modeHeader, + "Replicas": replicasHeader, + "Image": imageHeader, + "Ports": portsHeader, + } + return ctx.Write(&serviceCtx, render) +} + +type serviceContext struct { + HeaderContext + service swarm.Service + mode string + replicas string +} + +func (c *serviceContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *serviceContext) ID() string { + return stringid.TruncateID(c.service.ID) +} + +func (c *serviceContext) Name() string { + return c.service.Spec.Name +} + +func (c *serviceContext) Mode() string { + return c.mode +} + +func (c *serviceContext) Replicas() string { + return c.replicas +} + +func (c *serviceContext) Image() string { + image := c.service.Spec.TaskTemplate.ContainerSpec.Image + if ref, err := reference.ParseNormalizedNamed(image); err == nil { + // update image string for display, (strips any digest) + if nt, ok := ref.(reference.NamedTagged); ok { + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + image = reference.FamiliarString(namedTagged) + } + } + } + + return image +} + +func (c *serviceContext) Ports() string { + if c.service.Spec.EndpointSpec == nil || c.service.Spec.EndpointSpec.Ports == nil { + return "" + } + ports := []string{} + for _, pConfig := range c.service.Spec.EndpointSpec.Ports { + if pConfig.PublishMode == swarm.PortConfigPublishModeIngress { + ports = append(ports, fmt.Sprintf("*:%d->%d/%s", + pConfig.PublishedPort, + pConfig.TargetPort, + pConfig.Protocol, + )) + } + } + return strings.Join(ports, ",") +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/stack.go b/vendor/github.com/docker/cli/cli/command/formatter/stack.go new file mode 100644 index 0000000..676bcc0 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/stack.go @@ -0,0 +1,67 @@ +package formatter + +import ( + "strconv" +) + +const ( + defaultStackTableFormat = "table {{.Name}}\t{{.Services}}" + + stackServicesHeader = "SERVICES" +) + +// Stack contains deployed stack information. +type Stack struct { + // Name is the name of the stack + Name string + // Services is the number of the services + Services int +} + +// NewStackFormat returns a format for use with a stack Context +func NewStackFormat(source string) Format { + switch source { + case TableFormatKey: + return defaultStackTableFormat + } + return Format(source) +} + +// StackWrite writes formatted stacks using the Context +func StackWrite(ctx Context, stacks []*Stack) error { + render := func(format func(subContext subContext) error) error { + for _, stack := range stacks { + if err := format(&stackContext{s: stack}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newStackContext(), render) +} + +type stackContext struct { + HeaderContext + s *Stack +} + +func newStackContext() *stackContext { + stackCtx := stackContext{} + stackCtx.header = map[string]string{ + "Name": nameHeader, + "Services": stackServicesHeader, + } + return &stackCtx +} + +func (s *stackContext) MarshalJSON() ([]byte, error) { + return marshalJSON(s) +} + +func (s *stackContext) Name() string { + return s.s.Name +} + +func (s *stackContext) Services() string { + return strconv.Itoa(s.s.Services) +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/stats.go b/vendor/github.com/docker/cli/cli/command/formatter/stats.go new file mode 100644 index 0000000..f53387e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/stats.go @@ -0,0 +1,218 @@ +package formatter + +import ( + "fmt" + "sync" + + units "github.com/docker/go-units" +) + +const ( + winOSType = "windows" + defaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" + winDefaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" + + containerHeader = "CONTAINER" + cpuPercHeader = "CPU %" + netIOHeader = "NET I/O" + blockIOHeader = "BLOCK I/O" + memPercHeader = "MEM %" // Used only on Linux + winMemUseHeader = "PRIV WORKING SET" // Used only on Windows + memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux + pidsHeader = "PIDS" // Used only on Linux +) + +// StatsEntry represents represents the statistics data collected from a container +type StatsEntry struct { + Container string + Name string + ID string + CPUPercentage float64 + Memory float64 // On Windows this is the private working set + MemoryLimit float64 // Not used on Windows + MemoryPercentage float64 // Not used on Windows + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 // Not used on Windows + IsInvalid bool +} + +// ContainerStats represents an entity to store containers statistics synchronously +type ContainerStats struct { + mutex sync.Mutex + StatsEntry + err error +} + +// GetError returns the container statistics error. +// This is used to determine whether the statistics are valid or not +func (cs *ContainerStats) GetError() error { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.err +} + +// SetErrorAndReset zeroes all the container statistics and store the error. +// It is used when receiving time out error during statistics collecting to reduce lock overhead +func (cs *ContainerStats) SetErrorAndReset(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.CPUPercentage = 0 + cs.Memory = 0 + cs.MemoryPercentage = 0 + cs.MemoryLimit = 0 + cs.NetworkRx = 0 + cs.NetworkTx = 0 + cs.BlockRead = 0 + cs.BlockWrite = 0 + cs.PidsCurrent = 0 + cs.err = err + cs.IsInvalid = true +} + +// SetError sets container statistics error +func (cs *ContainerStats) SetError(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.err = err + if err != nil { + cs.IsInvalid = true + } +} + +// SetStatistics set the container statistics +func (cs *ContainerStats) SetStatistics(s StatsEntry) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + s.Container = cs.Container + cs.StatsEntry = s +} + +// GetStatistics returns container statistics with other meta data such as the container name +func (cs *ContainerStats) GetStatistics() StatsEntry { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.StatsEntry +} + +// NewStatsFormat returns a format for rendering an CStatsContext +func NewStatsFormat(source, osType string) Format { + if source == TableFormatKey { + if osType == winOSType { + return Format(winDefaultStatsTableFormat) + } + return Format(defaultStatsTableFormat) + } + return Format(source) +} + +// NewContainerStats returns a new ContainerStats entity and sets in it the given name +func NewContainerStats(container string) *ContainerStats { + return &ContainerStats{StatsEntry: StatsEntry{Container: container}} +} + +// ContainerStatsWrite renders the context for a list of containers statistics +func ContainerStatsWrite(ctx Context, containerStats []StatsEntry, osType string) error { + render := func(format func(subContext subContext) error) error { + for _, cstats := range containerStats { + containerStatsCtx := &containerStatsContext{ + s: cstats, + os: osType, + } + if err := format(containerStatsCtx); err != nil { + return err + } + } + return nil + } + memUsage := memUseHeader + if osType == winOSType { + memUsage = winMemUseHeader + } + containerStatsCtx := containerStatsContext{} + containerStatsCtx.header = map[string]string{ + "Container": containerHeader, + "Name": nameHeader, + "ID": containerIDHeader, + "CPUPerc": cpuPercHeader, + "MemUsage": memUsage, + "MemPerc": memPercHeader, + "NetIO": netIOHeader, + "BlockIO": blockIOHeader, + "PIDs": pidsHeader, + } + containerStatsCtx.os = osType + return ctx.Write(&containerStatsCtx, render) +} + +type containerStatsContext struct { + HeaderContext + s StatsEntry + os string +} + +func (c *containerStatsContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *containerStatsContext) Container() string { + return c.s.Container +} + +func (c *containerStatsContext) Name() string { + if len(c.s.Name) > 1 { + return c.s.Name[1:] + } + return "--" +} + +func (c *containerStatsContext) ID() string { + return c.s.ID +} + +func (c *containerStatsContext) CPUPerc() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.CPUPercentage) +} + +func (c *containerStatsContext) MemUsage() string { + if c.s.IsInvalid { + return fmt.Sprintf("-- / --") + } + if c.os == winOSType { + return fmt.Sprintf("%s", units.BytesSize(c.s.Memory)) + } + return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit)) +} + +func (c *containerStatsContext) MemPerc() string { + if c.s.IsInvalid || c.os == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage) +} + +func (c *containerStatsContext) NetIO() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3)) +} + +func (c *containerStatsContext) BlockIO() string { + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3)) +} + +func (c *containerStatsContext) PIDs() string { + if c.s.IsInvalid || c.os == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%d", c.s.PidsCurrent) +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/task.go b/vendor/github.com/docker/cli/cli/command/formatter/task.go new file mode 100644 index 0000000..6172b32 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/task.go @@ -0,0 +1,150 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + defaultTaskTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Image}}\t{{.Node}}\t{{.DesiredState}}\t{{.CurrentState}}\t{{.Error}}\t{{.Ports}}" + + nodeHeader = "NODE" + taskIDHeader = "ID" + desiredStateHeader = "DESIRED STATE" + currentStateHeader = "CURRENT STATE" + errorHeader = "ERROR" + + maxErrLength = 30 +) + +// NewTaskFormat returns a Format for rendering using a task Context +func NewTaskFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultTaskTableFormat + case RawFormatKey: + if quiet { + return `id: {{.ID}}` + } + return `id: {{.ID}}\nname: {{.Name}}\nimage: {{.Image}}\nnode: {{.Node}}\ndesired_state: {{.DesiredState}}\ncurrent_state: {{.CurrentState}}\nerror: {{.Error}}\nports: {{.Ports}}\n` + } + return Format(source) +} + +// TaskWrite writes the context +func TaskWrite(ctx Context, tasks []swarm.Task, names map[string]string, nodes map[string]string) error { + render := func(format func(subContext subContext) error) error { + for _, task := range tasks { + taskCtx := &taskContext{trunc: ctx.Trunc, task: task, name: names[task.ID], node: nodes[task.ID]} + if err := format(taskCtx); err != nil { + return err + } + } + return nil + } + taskCtx := taskContext{} + taskCtx.header = taskHeaderContext{ + "ID": taskIDHeader, + "Name": nameHeader, + "Image": imageHeader, + "Node": nodeHeader, + "DesiredState": desiredStateHeader, + "CurrentState": currentStateHeader, + "Error": errorHeader, + "Ports": portsHeader, + } + return ctx.Write(&taskCtx, render) +} + +type taskHeaderContext map[string]string + +type taskContext struct { + HeaderContext + trunc bool + task swarm.Task + name string + node string +} + +func (c *taskContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *taskContext) ID() string { + if c.trunc { + return stringid.TruncateID(c.task.ID) + } + return c.task.ID +} + +func (c *taskContext) Name() string { + return c.name +} + +func (c *taskContext) Image() string { + image := c.task.Spec.ContainerSpec.Image + if c.trunc { + ref, err := reference.ParseNormalizedNamed(image) + if err == nil { + // update image string for display, (strips any digest) + if nt, ok := ref.(reference.NamedTagged); ok { + if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil { + image = reference.FamiliarString(namedTagged) + } + } + } + } + return image +} + +func (c *taskContext) Node() string { + return c.node +} + +func (c *taskContext) DesiredState() string { + return command.PrettyPrint(c.task.DesiredState) +} + +func (c *taskContext) CurrentState() string { + return fmt.Sprintf("%s %s ago", + command.PrettyPrint(c.task.Status.State), + strings.ToLower(units.HumanDuration(time.Since(c.task.Status.Timestamp))), + ) +} + +func (c *taskContext) Error() string { + // Trim and quote the error message. + taskErr := c.task.Status.Err + if c.trunc && len(taskErr) > maxErrLength { + taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) + } + if len(taskErr) > 0 { + taskErr = fmt.Sprintf("\"%s\"", taskErr) + } + return taskErr +} + +func (c *taskContext) Ports() string { + if len(c.task.Status.PortStatus.Ports) == 0 { + return "" + } + ports := []string{} + for _, pConfig := range c.task.Status.PortStatus.Ports { + ports = append(ports, fmt.Sprintf("*:%d->%d/%s", + pConfig.PublishedPort, + pConfig.TargetPort, + pConfig.Protocol, + )) + } + return strings.Join(ports, ",") +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/volume.go b/vendor/github.com/docker/cli/cli/command/formatter/volume.go new file mode 100644 index 0000000..342f2fb --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/formatter/volume.go @@ -0,0 +1,131 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultVolumeQuietFormat = "{{.Name}}" + defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" + + volumeNameHeader = "VOLUME NAME" + mountpointHeader = "MOUNTPOINT" + linksHeader = "LINKS" + // Status header ? +) + +// NewVolumeFormat returns a format for use with a volume Context +func NewVolumeFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultVolumeQuietFormat + } + return defaultVolumeTableFormat + case RawFormatKey: + if quiet { + return `name: {{.Name}}` + } + return `name: {{.Name}}\ndriver: {{.Driver}}\n` + } + return Format(source) +} + +// VolumeWrite writes formatted volumes using the Context +func VolumeWrite(ctx Context, volumes []*types.Volume) error { + render := func(format func(subContext subContext) error) error { + for _, volume := range volumes { + if err := format(&volumeContext{v: *volume}); err != nil { + return err + } + } + return nil + } + return ctx.Write(newVolumeContext(), render) +} + +type volumeHeaderContext map[string]string + +func (c volumeHeaderContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + return h +} + +type volumeContext struct { + HeaderContext + v types.Volume +} + +func newVolumeContext() *volumeContext { + volumeCtx := volumeContext{} + volumeCtx.header = volumeHeaderContext{ + "Name": volumeNameHeader, + "Driver": driverHeader, + "Scope": scopeHeader, + "Mountpoint": mountpointHeader, + "Labels": labelsHeader, + "Links": linksHeader, + "Size": sizeHeader, + } + return &volumeCtx +} + +func (c *volumeContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *volumeContext) Name() string { + return c.v.Name +} + +func (c *volumeContext) Driver() string { + return c.v.Driver +} + +func (c *volumeContext) Scope() string { + return c.v.Scope +} + +func (c *volumeContext) Mountpoint() string { + return c.v.Mountpoint +} + +func (c *volumeContext) Labels() string { + if c.v.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.v.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *volumeContext) Label(name string) string { + if c.v.Labels == nil { + return "" + } + return c.v.Labels[name] +} + +func (c *volumeContext) Links() string { + if c.v.UsageData == nil { + return "N/A" + } + return fmt.Sprintf("%d", c.v.UsageData.RefCount) +} + +func (c *volumeContext) Size() string { + if c.v.UsageData == nil { + return "N/A" + } + return units.HumanSize(float64(c.v.UsageData.Size)) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/build.go b/vendor/github.com/docker/cli/cli/command/image/build.go new file mode 100644 index 0000000..7397703 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/build.go @@ -0,0 +1,503 @@ +package image + +import ( + "archive/tar" + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image/build" + "github.com/docker/cli/opts" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + units "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type buildOptions struct { + context string + dockerfileName string + tags opts.ListOpts + labels opts.ListOpts + buildArgs opts.ListOpts + extraHosts opts.ListOpts + ulimits *opts.UlimitOpt + memory opts.MemBytes + memorySwap opts.MemSwapBytes + shmSize opts.MemBytes + cpuShares int64 + cpuPeriod int64 + cpuQuota int64 + cpuSetCpus string + cpuSetMems string + cgroupParent string + isolation string + quiet bool + noCache bool + rm bool + forceRm bool + pull bool + cacheFrom []string + compress bool + securityOpt []string + networkMode string + squash bool + target string + imageIDFile string + volumes opts.ListOpts +} + +// dockerfileFromStdin returns true when the user specified that the Dockerfile +// should be read from stdin instead of a file +func (o buildOptions) dockerfileFromStdin() bool { + return o.dockerfileName == "-" +} + +// contextFromStdin returns true when the user specified that the build context +// should be read from stdin +func (o buildOptions) contextFromStdin() bool { + return o.context == "-" +} + +// NewBuildCommand creates a new `docker build` command +func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command { + ulimits := make(map[string]*units.Ulimit) + options := buildOptions{ + tags: opts.NewListOpts(validateTag), + buildArgs: opts.NewListOpts(opts.ValidateEnv), + ulimits: opts.NewUlimitOpt(&ulimits), + labels: opts.NewListOpts(opts.ValidateEnv), + extraHosts: opts.NewListOpts(opts.ValidateExtraHost), + volumes: opts.NewListOpts(nil), + } + + cmd := &cobra.Command{ + Use: "build [OPTIONS] PATH | URL | -", + Short: "Build an image from a Dockerfile", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.context = args[0] + return runBuild(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") + flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") + flags.Var(options.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flags.VarP(&options.memory, "memory", "m", "Memory limit") + flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") + flags.Var(&options.labels, "label", "Set metadata for an image") + flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") + flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") + flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") + flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") + flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") + flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") + flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") + flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") + flags.SetAnnotation("network", "version", []string{"1.25"}) + flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.StringVar(&options.target, "target", "", "Set the target build stage to build.") + flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file") + + command.AddTrustVerificationFlags(flags) + + flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") + flags.SetAnnotation("squash", "experimental", nil) + flags.SetAnnotation("squash", "version", []string{"1.25"}) + flags.VarP(&options.volumes, "volume", "v", "Bind mount a volume") + + return cmd +} + +// lastProgressOutput is the same as progress.Output except +// that it only output with the last update. It is used in +// non terminal scenarios to suppress verbose messages +type lastProgressOutput struct { + output progress.Output +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { + if !prog.LastUpdate { + return nil + } + + return out.output.WriteProgress(prog) +} + +// nolint: gocyclo +func runBuild(dockerCli *command.DockerCli, options buildOptions) error { + var ( + buildCtx io.ReadCloser + dockerfileCtx io.ReadCloser + err error + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + if options.dockerfileFromStdin() { + if options.contextFromStdin() { + return errors.New("invalid argument: can't use stdin for both build context and dockerfile") + } + dockerfileCtx = dockerCli.In() + } + + specifiedContext := options.context + progBuff = dockerCli.Out() + buildBuff = dockerCli.Out() + if options.quiet { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + if options.imageIDFile != "" { + // Avoid leaving a stale file if we eventually fail + if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "Removing image ID file") + } + } + + switch { + case options.contextFromStdin(): + buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName) + case isLocalDir(specifiedContext): + contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName) + case urlutil.IsURL(specifiedContext): + buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) + default: + return errors.Errorf("unable to prepare context: path %q not found", specifiedContext) + } + + if err != nil { + if options.quiet && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(dockerCli.Err(), progBuff) + } + return errors.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if buildCtx == nil { + excludes, err := build.ReadDockerignore(contextDir) + if err != nil { + return err + } + + if err := build.ValidateContextDirectory(contextDir, excludes); err != nil { + return errors.Errorf("error checking context: '%s'.", err) + } + + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin()) + + compression := archive.Uncompressed + if options.compress { + compression = archive.Gzip + } + buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: compression, + ExcludePatterns: excludes, + }) + if err != nil { + return err + } + } + + // replace Dockerfile if added dynamically + if dockerfileCtx != nil { + buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx) + if err != nil { + return err + } + } + + ctx := context.Background() + + var resolvedTags []*resolvedTag + if command.IsTrusted() { + translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { + return TrustedReference(ctx, dockerCli, ref, nil) + } + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewProgressOutput(progBuff) + if !dockerCli.Out().IsTerminal() { + progressOutput = &lastProgressOutput{output: progressOutput} + } + + var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") + + authConfigs, _ := dockerCli.GetAllCredentials() + buildOptions := types.ImageBuildOptions{ + Memory: options.memory.Value(), + MemorySwap: options.memorySwap.Value(), + Tags: options.tags.GetAll(), + SuppressOutput: options.quiet, + NoCache: options.noCache, + Remove: options.rm, + ForceRemove: options.forceRm, + PullParent: options.pull, + Isolation: container.Isolation(options.isolation), + CPUSetCPUs: options.cpuSetCpus, + CPUSetMems: options.cpuSetMems, + CPUShares: options.cpuShares, + CPUQuota: options.cpuQuota, + CPUPeriod: options.cpuPeriod, + CgroupParent: options.cgroupParent, + Dockerfile: relDockerfile, + ShmSize: options.shmSize.Value(), + Ulimits: options.ulimits.GetList(), + BuildArgs: dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), options.buildArgs.GetAll()), + AuthConfigs: authConfigs, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + CacheFrom: options.cacheFrom, + SecurityOpt: options.securityOpt, + NetworkMode: options.networkMode, + Squash: options.squash, + ExtraHosts: options.extraHosts.GetAll(), + Target: options.target, + Volumes: options.volumes.GetAll(), + } + + response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) + if err != nil { + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s", progBuff) + } + return err + } + defer response.Body.Close() + + imageID := "" + aux := func(auxJSON *json.RawMessage) { + var result types.BuildResult + if err := json.Unmarshal(*auxJSON, &result); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err) + } else { + imageID = result.ID + } + } + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + return err + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { + fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+ + "image from Windows against a non-Windows Docker host. All files and "+ + "directories added to build context will have '-rwxr-xr-x' permissions. "+ + "It is recommended to double check and reset permissions for sensitive "+ + "files and directories.") + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if options.quiet { + imageID = fmt.Sprintf("%s", buildBuff) + fmt.Fprintf(dockerCli.Out(), imageID) + } + + if options.imageIDFile != "" { + if imageID == "" { + return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile) + } + if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil { + return err + } + } + if command.IsTrusted() { + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + } + + return nil +} + +func isLocalDir(c string) bool { + _, err := os.Stat(c) + return err == nil +} + +type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNormalizedNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { + scanner := bufio.NewScanner(dockerfile) + buf := bytes.NewBuffer(nil) + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != api.NoBaseImageSpecifier { + // Replace the line with a resolved "FROM repo@digest" + var ref reference.Named + ref, err = reference.ParseNormalizedNamed(matches[1]) + if err != nil { + return nil, nil, err + } + ref = reference.TagNameOnly(ref) + if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { + trustedRef, err := translator(ctx, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef))) + resolvedTags = append(resolvedTags, &resolvedTag{ + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + _, err := fmt.Fprintln(buf, line) + if err != nil { + return nil, nil, err + } + } + + return buf.Bytes(), resolvedTags, scanner.Err() +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + content := io.Reader(tarReader) + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + var newDockerfile []byte + newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) + if err != nil { + pipeWriter.CloseWithError(err) + return + } + hdr.Size = int64(len(newDockerfile)) + content = bytes.NewBuffer(newDockerfile) + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/vendor/github.com/docker/cli/cli/command/image/build/context.go b/vendor/github.com/docker/cli/cli/command/image/build/context.go new file mode 100644 index 0000000..b1457bd --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/build/context.go @@ -0,0 +1,377 @@ +package build + +import ( + "archive/tar" + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/docker/docker/builder/remotecontext/git" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" + // archiveHeaderSize is the number of bytes in an archive header + archiveHeaderSize = 512 +) + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + if os.IsPermission(err) { + return errors.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return errors.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// GetContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive. Returns a tar archive used as a context and a +// path to the Dockerfile inside the tar. +func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { + buf := bufio.NewReader(r) + + magic, err := buf.Peek(archiveHeaderSize) + if err != nil && err != io.EOF { + return nil, "", errors.Errorf("failed to peek context header from STDIN: %v", err) + } + + if IsArchive(magic) { + return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil + } + + if dockerfileName == "-" { + return nil, "", errors.New("build context is not an archive") + } + + // Input should be read as a Dockerfile. + tmpDir, err := ioutil.TempDir("", "docker-build-context-") + if err != nil { + return nil, "", errors.Errorf("unable to create temporary context directory: %v", err) + } + + f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) + if err != nil { + return nil, "", err + } + _, err = io.Copy(f, buf) + if err != nil { + f.Close() + return nil, "", err + } + + if err := f.Close(); err != nil { + return nil, "", err + } + if err := r.Close(); err != nil { + return nil, "", err + } + + tar, err := archive.Tar(tmpDir, archive.Uncompressed) + if err != nil { + return nil, "", err + } + + return ioutils.NewReadCloserWrapper(tar, func() error { + err := tar.Close() + os.RemoveAll(tmpDir) + return err + }), DefaultDockerfileName, nil + +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := archive.DetectCompression(header) + if compression != archive.Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// GetContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func GetContextFromGitURL(gitURL, dockerfileName string) (string, string, error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", errors.Wrapf(err, "unable to find 'git'") + } + absContextDir, err := git.Clone(gitURL) + if err != nil { + return "", "", errors.Wrapf(err, "unable to 'git clone' to temporary context directory") + } + + absContextDir, err = ResolveAndValidateContextPath(absContextDir) + if err != nil { + return "", "", err + } + relDockerfile, err := getDockerfileRelPath(absContextDir, dockerfileName) + return absContextDir, relDockerfile, err +} + +// GetContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a tar archive. +// Returns the tar archive used for the context and a path of the +// dockerfile inside the tar. +func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { + response, err := getWithStatusError(remoteURL) + if err != nil { + return nil, "", errors.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + progressOutput := streamformatter.NewProgressOutput(out) + + // Pass the response body through a progress reader. + progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) + + return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) +} + +// getWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func getWithStatusError(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errors.Wrapf(err, msg+": error reading body") + } + return nil, errors.Errorf(msg+": %s", bytes.TrimSpace(body)) +} + +// GetContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func GetContextFromLocalDir(localDir, dockerfileName string) (string, string, error) { + localDir, err := ResolveAndValidateContextPath(localDir) + if err != nil { + return "", "", err + } + + // When using a local context directory, and the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" && dockerfileName != "-" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", errors.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + relDockerfile, err := getDockerfileRelPath(localDir, dockerfileName) + return localDir, relDockerfile, err +} + +// ResolveAndValidateContextPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory. +func ResolveAndValidateContextPath(givenContextDir string) (string, error) { + absContextDir, err := filepath.Abs(givenContextDir) + if err != nil { + return "", errors.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", errors.Errorf("unable to evaluate symlinks in context path: %v", err) + } + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", errors.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", errors.Errorf("context must be a directory: %s", absContextDir) + } + return absContextDir, err +} + +// getDockerfileRelPath returns the dockerfile path relative to the context +// directory +func getDockerfileRelPath(absContextDir, givenDockerfile string) (string, error) { + var err error + + if givenDockerfile == "-" { + return givenDockerfile, nil + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", errors.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + + } + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", errors.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", errors.Errorf("unable to stat Dockerfile: %v", err) + } + + relDockerfile, err := filepath.Rel(absContextDir, absDockerfile) + if err != nil { + return "", errors.Errorf("unable to get relative Dockerfile path: %v", err) + } + + if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", errors.Errorf("the Dockerfile (%s) must be within the build context", givenDockerfile) + } + + return relDockerfile, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} + +// AddDockerfileToBuildContext from a ReadCloser, returns a new archive and +// the relative path to the dockerfile in the context. +func AddDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCloser) (io.ReadCloser, string, error) { + file, err := ioutil.ReadAll(dockerfileCtx) + dockerfileCtx.Close() + if err != nil { + return nil, "", err + } + now := time.Now() + hdrTmpl := &tar.Header{ + Mode: 0600, + Uid: 0, + Gid: 0, + ModTime: now, + Typeflag: tar.TypeReg, + AccessTime: now, + ChangeTime: now, + } + randomName := ".dockerfile." + stringid.GenerateRandomID()[:20] + + buildCtx = archive.ReplaceFileTarWrapper(buildCtx, map[string]archive.TarModifierFunc{ + // Add the dockerfile with a random filename + randomName: func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + return hdrTmpl, file, nil + }, + // Update .dockerignore to include the random filename + ".dockerignore": func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + if h == nil { + h = hdrTmpl + } + + b := &bytes.Buffer{} + if content != nil { + if _, err := b.ReadFrom(content); err != nil { + return nil, nil, err + } + } else { + b.WriteString(".dockerignore") + } + b.WriteString("\n" + randomName + "\n") + return h, b.Bytes(), nil + }, + }) + return buildCtx, randomName, nil +} diff --git a/vendor/github.com/docker/cli/cli/command/image/build/context_unix.go b/vendor/github.com/docker/cli/cli/command/image/build/context_unix.go new file mode 100644 index 0000000..cb2634f --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/build/context_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package build + +import ( + "path/filepath" +) + +func getContextRoot(srcPath string) (string, error) { + return filepath.Join(srcPath, "."), nil +} diff --git a/vendor/github.com/docker/cli/cli/command/image/build/context_windows.go b/vendor/github.com/docker/cli/cli/command/image/build/context_windows.go new file mode 100644 index 0000000..c577cfa --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/build/context_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package build + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/longpath" +) + +func getContextRoot(srcPath string) (string, error) { + cr, err := filepath.Abs(srcPath) + if err != nil { + return "", err + } + return longpath.AddPrefix(cr), nil +} diff --git a/vendor/github.com/docker/cli/cli/command/image/build/dockerignore.go b/vendor/github.com/docker/cli/cli/command/image/build/dockerignore.go new file mode 100644 index 0000000..497c3f2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/build/dockerignore.go @@ -0,0 +1,39 @@ +package build + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" +) + +// ReadDockerignore reads the .dockerignore file in the context directory and +// returns the list of paths to exclude +func ReadDockerignore(contextDir string) ([]string, error) { + var excludes []string + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + switch { + case os.IsNotExist(err): + return excludes, nil + case err != nil: + return nil, err + } + defer f.Close() + + return dockerignore.ReadAll(f) +} + +// TrimBuildFilesFromExcludes removes the named Dockerfile and .dockerignore from +// the list of excluded files. The daemon will remove them from the final context +// but they must be in available in the context when passed to the API. +func TrimBuildFilesFromExcludes(excludes []string, dockerfile string, dockerfileFromStdin bool) []string { + if keep, _ := fileutils.Matches(".dockerignore", excludes); keep { + excludes = append(excludes, "!.dockerignore") + } + if keep, _ := fileutils.Matches(dockerfile, excludes); keep && !dockerfileFromStdin { + excludes = append(excludes, "!"+dockerfile) + } + return excludes +} diff --git a/vendor/github.com/docker/cli/cli/command/image/cmd.go b/vendor/github.com/docker/cli/cli/command/image/cmd.go new file mode 100644 index 0000000..0c68af1 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/cmd.go @@ -0,0 +1,35 @@ +package image + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewImageCommand returns a cobra command for `image` subcommands +// nolint: interfacer +func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "image", + Short: "Manage images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewBuildCommand(dockerCli), + NewHistoryCommand(dockerCli), + NewDeltaCommand(dockerCli), + NewImportCommand(dockerCli), + NewLoadCommand(dockerCli), + NewPullCommand(dockerCli), + NewPushCommand(dockerCli), + NewSaveCommand(dockerCli), + NewTagCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/image/delta.go b/vendor/github.com/docker/cli/cli/command/image/delta.go new file mode 100644 index 0000000..a2e9b1b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/delta.go @@ -0,0 +1,44 @@ +package image + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type deltaOptions struct { + src string + dest string +} + +// NewDeltaCommand creates a new `docker delta` command +func NewDeltaCommand(dockerCli command.Cli) *cobra.Command { + var options deltaOptions + + cmd := &cobra.Command{ + Use: "delta [OPTIONS] SRC_IMAGE DEST_IMAGE", + Short: "Create a binary delta between SRC_IMAGE and DEST_IMAGE", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.src = args[0] + options.dest = args[1] + return runDelta(dockerCli, options) + }, + } + + return cmd +} + +func runDelta(dockerCli command.Cli, options deltaOptions) error { + clnt := dockerCli.Client() + + responseBody, err := clnt.ImageDelta(context.Background(), options.src, options.dest) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/history.go b/vendor/github.com/docker/cli/cli/command/image/history.go new file mode 100644 index 0000000..27782d1 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/history.go @@ -0,0 +1,64 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type historyOptions struct { + image string + + human bool + quiet bool + noTrunc bool + format string +} + +// NewHistoryCommand creates a new `docker history` command +func NewHistoryCommand(dockerCli command.Cli) *cobra.Command { + var opts historyOptions + + cmd := &cobra.Command{ + Use: "history [OPTIONS] IMAGE", + Short: "Show the history of an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + return runHistory(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + + return cmd +} + +func runHistory(dockerCli command.Cli, opts historyOptions) error { + ctx := context.Background() + + history, err := dockerCli.Client().ImageHistory(ctx, opts.image) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + historyCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewHistoryFormat(format, opts.quiet, opts.human), + Trunc: !opts.noTrunc, + } + return formatter.HistoryWrite(historyCtx, opts.human, history) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/import.go b/vendor/github.com/docker/cli/cli/command/image/import.go new file mode 100644 index 0000000..1f7189a --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/import.go @@ -0,0 +1,87 @@ +package image + +import ( + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + dockeropts "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/urlutil" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type importOptions struct { + source string + reference string + changes dockeropts.ListOpts + message string +} + +// NewImportCommand creates a new `docker import` command +func NewImportCommand(dockerCli command.Cli) *cobra.Command { + var options importOptions + + cmd := &cobra.Command{ + Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", + Short: "Import the contents from a tarball to create a filesystem image", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.source = args[0] + if len(args) > 1 { + options.reference = args[1] + } + return runImport(dockerCli, options) + }, + } + + flags := cmd.Flags() + + options.changes = dockeropts.NewListOpts(nil) + flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image") + flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image") + + return cmd +} + +func runImport(dockerCli command.Cli, options importOptions) error { + var ( + in io.Reader + srcName = options.source + ) + + if options.source == "-" { + in = dockerCli.In() + } else if !urlutil.IsURL(options.source) { + srcName = "-" + file, err := os.Open(options.source) + if err != nil { + return err + } + defer file.Close() + in = file + } + + source := types.ImageImportSource{ + Source: in, + SourceName: srcName, + } + + importOptions := types.ImageImportOptions{ + Message: options.message, + Changes: options.changes.GetAll(), + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ImageImport(context.Background(), source, options.reference, importOptions) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/inspect.go b/vendor/github.com/docker/cli/cli/command/image/inspect.go new file mode 100644 index 0000000..a510e30 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/inspect.go @@ -0,0 +1,44 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker image inspect` +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] IMAGE [IMAGE...]", + Short: "Display detailed information on one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ImageInspectWithRaw(ctx, ref) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/list.go b/vendor/github.com/docker/cli/cli/command/image/list.go new file mode 100644 index 0000000..6dada82 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/list.go @@ -0,0 +1,95 @@ +package image + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type imagesOptions struct { + matchName string + + quiet bool + all bool + noTrunc bool + showDigests bool + format string + filter opts.FilterOpt +} + +// NewImagesCommand creates a new `docker images` command +func NewImagesCommand(dockerCli command.Cli) *cobra.Command { + options := imagesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "images [OPTIONS] [REPOSITORY[:TAG]]", + Short: "List images", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + options.matchName = args[0] + } + return runImages(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVarP(&options.all, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVar(&options.showDigests, "digests", false, "Show digests") + flags.StringVar(&options.format, "format", "", "Pretty-print images using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewImagesCommand(dockerCli) + cmd.Aliases = []string{"images", "list"} + cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" + return &cmd +} + +func runImages(dockerCli command.Cli, options imagesOptions) error { + ctx := context.Background() + + filters := options.filter.Value() + if options.matchName != "" { + filters.Add("reference", options.matchName) + } + + listOptions := types.ImageListOptions{ + All: options.all, + Filters: filters, + } + + images, err := dockerCli.Client().ImageList(ctx, listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().ImagesFormat + } else { + format = formatter.TableFormatKey + } + } + + imageCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewImageFormat(format, options.quiet, options.showDigests), + Trunc: !options.noTrunc, + }, + Digest: options.showDigests, + } + return formatter.ImageWrite(imageCtx, images) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/load.go b/vendor/github.com/docker/cli/cli/command/image/load.go new file mode 100644 index 0000000..6708599 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/load.go @@ -0,0 +1,77 @@ +package image + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type loadOptions struct { + input string + quiet bool +} + +// NewLoadCommand creates a new `docker load` command +func NewLoadCommand(dockerCli command.Cli) *cobra.Command { + var opts loadOptions + + cmd := &cobra.Command{ + Use: "load [OPTIONS]", + Short: "Load an image from a tar archive or STDIN", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLoad(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") + + return cmd +} + +func runLoad(dockerCli command.Cli, opts loadOptions) error { + + var input io.Reader = dockerCli.In() + if opts.input != "" { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(opts.input) + if err != nil { + return err + } + defer file.Close() + input = file + } + + // To avoid getting stuck, verify that a tar file is given either in + // the input flag or through stdin and if not display an error message and exit. + if opts.input == "" && dockerCli.In().IsTerminal() { + return errors.Errorf("requested load from stdin, but stdin is empty") + } + + if !dockerCli.Out().IsTerminal() { + opts.quiet = true + } + response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) + } + + _, err = io.Copy(dockerCli.Out(), response.Body) + return err +} diff --git a/vendor/github.com/docker/cli/cli/command/image/prune.go b/vendor/github.com/docker/cli/cli/command/image/prune.go new file mode 100644 index 0000000..8e521b6 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/prune.go @@ -0,0 +1,95 @@ +package image + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for images +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused images", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const ( + allImageWarning = `WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue?` + danglingWarning = `WARNING! This will remove all dangling images. +Are you sure you want to continue?` +) + +func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := options.filter.Value() + pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all)) + pruneFilters = command.PruneFilters(dockerCli, pruneFilters) + + warning := danglingWarning + if options.all { + warning = allImageWarning + } + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) + if err != nil { + return + } + + if len(report.ImagesDeleted) > 0 { + output = "Deleted Images:\n" + for _, st := range report.ImagesDeleted { + if st.Untagged != "" { + output += fmt.Sprintln("untagged:", st.Untagged) + } else { + output += fmt.Sprintln("deleted:", st.Deleted) + } + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Image Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter}) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/pull.go b/vendor/github.com/docker/cli/cli/command/image/pull.go new file mode 100644 index 0000000..e60e5a4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/pull.go @@ -0,0 +1,85 @@ +package image + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type pullOptions struct { + remote string + all bool +} + +// NewPullCommand creates a new `docker pull` command +func NewPullCommand(dockerCli command.Cli) *cobra.Command { + var opts pullOptions + + cmd := &cobra.Command{ + Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", + Short: "Pull an image or a repository from a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runPull(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") + command.AddTrustVerificationFlags(flags) + + return cmd +} + +func runPull(dockerCli command.Cli, opts pullOptions) error { + distributionRef, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return err + } + if opts.all && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !opts.all && reference.IsNameOnly(distributionRef) { + distributionRef = reference.TagNameOnly(distributionRef) + if tagged, ok := distributionRef.(reference.Tagged); ok { + fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", tagged.Tag()) + } + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull") + + // Check if reference has a digest + _, isCanonical := distributionRef.(reference.Canonical) + if command.IsTrusted() && !isCanonical { + err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege) + } else { + err = imagePullPrivileged(ctx, dockerCli, authConfig, reference.FamiliarString(distributionRef), requestPrivilege, opts.all) + } + if err != nil { + if strings.Contains(err.Error(), "when fetching 'plugin'") { + return errors.New(err.Error() + " - Use `docker plugin install`") + } + return err + } + + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/image/push.go b/vendor/github.com/docker/cli/cli/command/image/push.go new file mode 100644 index 0000000..cc95897 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/push.go @@ -0,0 +1,61 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewPushCommand creates a new `docker push` command +func NewPushCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "push [OPTIONS] NAME[:TAG]", + Short: "Push an image or a repository to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPush(dockerCli, args[0]) + }, + } + + flags := cmd.Flags() + + command.AddTrustSigningFlags(flags) + + return cmd +} + +func runPush(dockerCli command.Cli, remote string) error { + ref, err := reference.ParseNormalizedNamed(remote) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + ctx := context.Background() + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") + + if command.IsTrusted() { + return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) + } + + responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/remove.go b/vendor/github.com/docker/cli/cli/command/image/remove.go new file mode 100644 index 0000000..894d305 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/remove.go @@ -0,0 +1,82 @@ +package image + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + noPrune bool +} + +// NewRemoveCommand creates a new `docker remove` command +func NewRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rmi [OPTIONS] IMAGE [IMAGE...]", + Short: "Remove one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, opts, args) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") + flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") + + return cmd +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewRemoveCommand(dockerCli) + cmd.Aliases = []string{"rmi", "remove"} + cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" + return &cmd +} + +func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error { + client := dockerCli.Client() + ctx := context.Background() + + options := types.ImageRemoveOptions{ + Force: opts.force, + PruneChildren: !opts.noPrune, + } + + var errs []string + for _, img := range images { + dels, err := client.ImageRemove(ctx, img, options) + if err != nil { + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) + } + } + } + } + + if len(errs) > 0 { + msg := strings.Join(errs, "\n") + if !opts.force { + return errors.New(msg) + } + fmt.Fprintf(dockerCli.Err(), msg) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/image/save.go b/vendor/github.com/docker/cli/cli/command/image/save.go new file mode 100644 index 0000000..ba666d2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/save.go @@ -0,0 +1,56 @@ +package image + +import ( + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type saveOptions struct { + images []string + output string +} + +// NewSaveCommand creates a new `docker save` command +func NewSaveCommand(dockerCli command.Cli) *cobra.Command { + var opts saveOptions + + cmd := &cobra.Command{ + Use: "save [OPTIONS] IMAGE [IMAGE...]", + Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.images = args + return runSave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runSave(dockerCli command.Cli, opts saveOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect") + } + + responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/tag.go b/vendor/github.com/docker/cli/cli/command/image/tag.go new file mode 100644 index 0000000..2a50c12 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/tag.go @@ -0,0 +1,41 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type tagOptions struct { + image string + name string +} + +// NewTagCommand creates a new `docker tag` command +func NewTagCommand(dockerCli command.Cli) *cobra.Command { + var opts tagOptions + + cmd := &cobra.Command{ + Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", + Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + opts.name = args[1] + return runTag(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTag(dockerCli command.Cli, opts tagOptions) error { + ctx := context.Background() + + return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/trust.go b/vendor/github.com/docker/cli/cli/command/image/trust.go new file mode 100644 index 0000000..dd0d12c --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/trust.go @@ -0,0 +1,384 @@ +package image + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "io" + "path" + "sort" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/docker/notary/client" + "github.com/docker/notary/tuf/data" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type target struct { + name string + digest digest.Digest + size int64 +} + +// trustedPush handles content trust pushing of an image +func trustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) +} + +// PushTrustedReference pushes a canonical reference to the trust server. +// nolint: gocyclo +func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { + // If it is a trusted push we would like to find the target entry which match the + // tag provided in the function and then do an AddTarget later. + target := &client.Target{} + // Count the times of calling for handleTarget, + // if it is called more that once, that should be considered an error in a trusted push. + cnt := 0 + handleTarget := func(aux *json.RawMessage) { + cnt++ + if cnt > 1 { + // handleTarget should only be called one. This will be treated as an error. + return + } + + var pushResult types.PushResult + err := json.Unmarshal(*aux, &pushResult) + if err == nil && pushResult.Tag != "" { + if dgst, err := digest.Parse(pushResult.Digest); err == nil { + h, err := hex.DecodeString(dgst.Hex()) + if err != nil { + target = nil + return + } + target.Name = pushResult.Tag + target.Hashes = data.Hashes{string(dgst.Algorithm()): h} + target.Length = int64(pushResult.Size) + } + } + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + default: + // We want trust signatures to always take an explicit tag, + // otherwise it will act as an untrusted push. + if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil { + return err + } + fmt.Fprintln(streams.Out(), "No tag specified, skipping trust metadata push") + return nil + } + + if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil { + return err + } + + if cnt > 1 { + return errors.Errorf("internal error: only one call to handleTarget expected") + } + + if target == nil { + fmt.Fprintln(streams.Out(), "No targets found, please provide a specific tag in order to sign it") + return nil + } + + fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata") + + repo, err := trust.GetNotaryRepository(streams, repoInfo, authConfig, "push", "pull") + if err != nil { + fmt.Fprintf(streams.Out(), "Error establishing connection to notary repository: %s\n", err) + return err + } + + // get the latest repository metadata so we can figure out which roles to sign + err = repo.Update(false) + + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) + var rootKeyID string + // always select the first root key + if len(keys) > 0 { + sort.Strings(keys) + rootKeyID = keys[0] + } else { + rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return err + } + rootKeyID = rootPublicKey.ID() + } + + // Initialize the notary repository with a remotely managed snapshot key + if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return trust.NotaryError(repoInfo.Name.Name(), err) + } + fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name()) + err = repo.AddTarget(target, data.CanonicalTargetsRole) + case nil: + // already initialized and we have successfully downloaded the latest metadata + err = addTargetToAllSignableRoles(repo, target) + default: + return trust.NotaryError(repoInfo.Name.Name(), err) + } + + if err == nil { + err = repo.Publish() + } + + if err != nil { + fmt.Fprintf(streams.Out(), "Failed to sign %q:%s - %s\n", repoInfo.Name.Name(), tag, err.Error()) + return trust.NotaryError(repoInfo.Name.Name(), err) + } + + fmt.Fprintf(streams.Out(), "Successfully signed %q:%s\n", repoInfo.Name.Name(), tag) + return nil +} + +// Attempt to add the image target to all the top level delegation roles we can +// (based on whether we have the signing key and whether the role's path allows +// us to). +// If there are no delegation roles, we add to the targets role. +func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { + var signableRoles []string + + // translate the full key names, which includes the GUN, into just the key IDs + allCanonicalKeyIDs := make(map[string]struct{}) + for fullKeyID := range repo.CryptoService.ListAllKeys() { + allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} + } + + allDelegationRoles, err := repo.GetDelegationRoles() + if err != nil { + return err + } + + // if there are no delegation roles, then just try to sign it into the targets role + if len(allDelegationRoles) == 0 { + return repo.AddTarget(target, data.CanonicalTargetsRole) + } + + // there are delegation roles, find every delegation role we have a key for, and + // attempt to sign into into all those roles. + for _, delegationRole := range allDelegationRoles { + // We do not support signing any delegation role that isn't a direct child of the targets role. + // Also don't bother checking the keys if we can't add the target + // to this role due to path restrictions + if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { + continue + } + + for _, canonicalKeyID := range delegationRole.KeyIDs { + if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { + signableRoles = append(signableRoles, delegationRole.Name) + break + } + } + } + + if len(signableRoles) == 0 { + return errors.Errorf("no valid signing keys for delegation roles") + } + + return repo.AddTarget(target, signableRoles...) +} + +// imagePushPrivileged push the image +func imagePushPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref reference.Reference, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + } + + return cli.Client().ImagePush(ctx, reference.FamiliarString(ref), options) +} + +// trustedPull handles content trust pulling of an image +func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + var refs []target + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return err + } + + if tagged, isTagged := ref.(reference.NamedTagged); !isTagged { + // List all targets + targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(ref.Name(), err) + } + for _, tgt := range targets { + t, err := convertTarget(tgt.Target) + if err != nil { + fmt.Fprintf(cli.Out(), "Skipping target for %q\n", reference.FamiliarName(ref)) + continue + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { + continue + } + refs = append(refs, t) + } + if len(refs) == 0 { + return trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name())) + } + } else { + t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(ref.Name(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag())) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + r, err := convertTarget(t.Target) + if err != nil { + return err + + } + refs = append(refs, r) + } + + for i, r := range refs { + displayTag := r.name + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), reference.FamiliarName(ref), displayTag, r.digest) + + trustedRef, err := reference.WithDigest(reference.TrimNamed(ref), r.digest) + if err != nil { + return err + } + if err := imagePullPrivileged(ctx, cli, authConfig, reference.FamiliarString(trustedRef), requestPrivilege, false); err != nil { + return err + } + + tagged, err := reference.WithTag(reference.TrimNamed(ref), r.name) + if err != nil { + return err + } + + if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { + return err + } + } + return nil +} + +// imagePullPrivileged pulls the image and displays it to the output +func imagePullPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + All: all, + } + + responseBody, err := cli.Client().ImagePull(ctx, ref, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) +} + +// TrustedReference returns the canonical trusted reference for an image reference +func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { + var ( + repoInfo *registry.RepositoryInfo + err error + ) + if rs != nil { + repoInfo, err = rs.ResolveRepository(ref) + } else { + repoInfo, err = registry.ParseRepositoryInfo(ref) + } + if err != nil { + return nil, err + } + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return nil, err + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.Name.Name(), err) + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", ref.Tag())) + } + r, err := convertTarget(t.Target) + if err != nil { + return nil, err + + } + + return reference.WithDigest(reference.TrimNamed(ref), r.digest) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + name: t.Name, + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +// TagTrusted tags a trusted ref +// nolint: interfacer +func TagTrusted(ctx context.Context, cli command.Cli, trustedRef reference.Canonical, ref reference.NamedTagged) error { + // Use familiar references when interacting with client and output + familiarRef := reference.FamiliarString(ref) + trustedFamiliarRef := reference.FamiliarString(trustedRef) + + fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef) + + return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef) +} diff --git a/vendor/github.com/docker/cli/cli/command/in.go b/vendor/github.com/docker/cli/cli/command/in.go new file mode 100644 index 0000000..54855c6 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/in.go @@ -0,0 +1,56 @@ +package command + +import ( + "errors" + "io" + "os" + "runtime" + + "github.com/docker/docker/pkg/term" +) + +// InStream is an input stream used by the DockerCli to read user input +type InStream struct { + CommonStream + in io.ReadCloser +} + +func (i *InStream) Read(p []byte) (int, error) { + return i.in.Read(p) +} + +// Close implements the Closer interface +func (i *InStream) Close() error { + return i.in.Close() +} + +// SetRawTerminal sets raw mode on the input terminal +func (i *InStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !i.CommonStream.isTerminal { + return nil + } + i.CommonStream.state, err = term.SetRawTerminal(i.CommonStream.fd) + return err +} + +// CheckTty checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !i.isTerminal { + eText := "the input device is not a TTY" + if runtime.GOOS == "windows" { + return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") + } + return errors.New(eText) + } + return nil +} + +// NewInStream returns a new InStream object from a ReadCloser +func NewInStream(in io.ReadCloser) *InStream { + fd, isTerminal := term.GetFdInfo(in) + return &InStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, in: in} +} diff --git a/vendor/github.com/docker/cli/cli/command/inspect/inspector.go b/vendor/github.com/docker/cli/cli/command/inspect/inspector.go new file mode 100644 index 0000000..054381f --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/inspect/inspector.go @@ -0,0 +1,199 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "io" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli" + "github.com/docker/docker/pkg/templates" + "github.com/pkg/errors" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// NewTemplateInspectorFromString creates a new TemplateInspector from a string +// which is compiled into a template. +func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { + if tmplStr == "" { + return NewIndentedInspector(out), nil + } + + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, errors.Errorf("Template parsing error: %s", err) + } + return NewTemplateInspector(out, tmpl), nil +} + +// GetRefFunc is a function which used by Inspect to fetch an object from a +// reference +type GetRefFunc func(ref string) (interface{}, []byte, error) + +// Inspect fetches objects by reference using GetRefFunc and writes the json +// representation to the output writer. +func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { + inspector, err := NewTemplateInspectorFromString(out, tmplStr) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErrs []string + for _, ref := range references { + element, raw, err := getRef(ref) + if err != nil { + inspectErrs = append(inspectErrs, err.Error()) + continue + } + + if err := inspector.Inspect(element, raw); err != nil { + inspectErrs = append(inspectErrs, err.Error()) + } + } + + if err := inspector.Flush(); err != nil { + logrus.Errorf("%s\n", err) + } + + if len(inspectErrs) != 0 { + return cli.StatusError{ + StatusCode: 1, + Status: strings.Join(inspectErrs, "\n"), + } + } + return nil +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return errors.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// tryRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + dec.UseNumber() + + if rawErr := dec.Decode(&raw); rawErr != nil { + return errors.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return errors.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/vendor/github.com/docker/cli/cli/command/network/cmd.go b/vendor/github.com/docker/cli/cli/command/network/cmd.go new file mode 100644 index 0000000..97bcca2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/network/cmd.go @@ -0,0 +1,29 @@ +package network + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewNetworkCommand returns a cobra command for `network` subcommands +// nolint: interfacer +func NewNetworkCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "network", + Short: "Manage networks", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + newConnectCommand(dockerCli), + newCreateCommand(dockerCli), + newDisconnectCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/network/connect.go b/vendor/github.com/docker/cli/cli/command/network/connect.go new file mode 100644 index 0000000..19bd3c5 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/network/connect.go @@ -0,0 +1,62 @@ +package network + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types/network" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type connectOptions struct { + network string + container string + ipaddress string + ipv6address string + links opts.ListOpts + aliases []string + linklocalips []string +} + +func newConnectCommand(dockerCli *command.DockerCli) *cobra.Command { + options := connectOptions{ + links: opts.NewListOpts(opts.ValidateLink), + } + + cmd := &cobra.Command{ + Use: "connect [OPTIONS] NETWORK CONTAINER", + Short: "Connect a container to a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.network = args[0] + options.container = args[1] + return runConnect(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.StringVar(&options.ipaddress, "ip", "", "IPv4 address (e.g., 172.30.100.104)") + flags.StringVar(&options.ipv6address, "ip6", "", "IPv6 address (e.g., 2001:db8::33)") + flags.Var(&options.links, "link", "Add link to another container") + flags.StringSliceVar(&options.aliases, "alias", []string{}, "Add network-scoped alias for the container") + flags.StringSliceVar(&options.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") + + return cmd +} + +func runConnect(dockerCli *command.DockerCli, options connectOptions) error { + client := dockerCli.Client() + + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: options.ipaddress, + IPv6Address: options.ipv6address, + LinkLocalIPs: options.linklocalips, + }, + Links: options.links.GetAll(), + Aliases: options.aliases, + } + + return client.NetworkConnect(context.Background(), options.network, options.container, epConfig) +} diff --git a/vendor/github.com/docker/cli/cli/command/network/create.go b/vendor/github.com/docker/cli/cli/command/network/create.go new file mode 100644 index 0000000..ed6d2de --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/network/create.go @@ -0,0 +1,248 @@ +package network + +import ( + "fmt" + "net" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type createOptions struct { + name string + scope string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts + internal bool + ipv6 bool + attachable bool + ingress bool + configOnly bool + configFrom string + + ipamDriver string + ipamSubnet []string + ipamIPRange []string + ipamGateway []string + ipamAux opts.MapOpts + ipamOpt opts.MapOpts +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + options := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(opts.ValidateEnv), + ipamAux: *opts.NewMapOpts(nil, nil), + ipamOpt: *opts.NewMapOpts(nil, nil), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] NETWORK", + Short: "Create a network", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.name = args[0] + return runCreate(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&options.driver, "driver", "d", "bridge", "Driver to manage the Network") + flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&options.labels, "label", "Set metadata on a network") + flags.BoolVar(&options.internal, "internal", false, "Restrict external access to the network") + flags.BoolVar(&options.ipv6, "ipv6", false, "Enable IPv6 networking") + flags.BoolVar(&options.attachable, "attachable", false, "Enable manual container attachment") + flags.SetAnnotation("attachable", "version", []string{"1.25"}) + flags.BoolVar(&options.ingress, "ingress", false, "Create swarm routing-mesh network") + flags.SetAnnotation("ingress", "version", []string{"1.29"}) + flags.StringVar(&options.scope, "scope", "", "Control the network's scope") + flags.SetAnnotation("scope", "version", []string{"1.30"}) + flags.BoolVar(&options.configOnly, "config-only", false, "Create a configuration only network") + flags.SetAnnotation("config-only", "version", []string{"1.30"}) + flags.StringVar(&options.configFrom, "config-from", "", "The network from which copying the configuration") + flags.SetAnnotation("config-from", "version", []string{"1.30"}) + + flags.StringVar(&options.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") + flags.StringSliceVar(&options.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") + flags.StringSliceVar(&options.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") + flags.StringSliceVar(&options.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") + + flags.Var(&options.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") + flags.Var(&options.ipamOpt, "ipam-opt", "Set IPAM driver specific options") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, options createOptions) error { + client := dockerCli.Client() + + ipamCfg, err := consolidateIpam(options.ipamSubnet, options.ipamIPRange, options.ipamGateway, options.ipamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Driver: options.driver, + Options: options.driverOpts.GetAll(), + IPAM: &network.IPAM{ + Driver: options.ipamDriver, + Config: ipamCfg, + Options: options.ipamOpt.GetAll(), + }, + CheckDuplicate: true, + Internal: options.internal, + EnableIPv6: options.ipv6, + Attachable: options.attachable, + Ingress: options.ingress, + Scope: options.scope, + ConfigOnly: options.configOnly, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + } + + if from := options.configFrom; from != "" { + nc.ConfigFrom = &network.ConfigReference{ + Network: from, + } + } + + resp, err := client.NetworkCreate(context.Background(), options.name, nc) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) + return nil +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consolidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +// nolint: gocyclo +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, errors.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, errors.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, errors.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, errors.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, errors.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, errors.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, errors.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} diff --git a/vendor/github.com/docker/cli/cli/command/network/disconnect.go b/vendor/github.com/docker/cli/cli/command/network/disconnect.go new file mode 100644 index 0000000..0f7d7c2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/network/disconnect.go @@ -0,0 +1,41 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type disconnectOptions struct { + network string + container string + force bool +} + +func newDisconnectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := disconnectOptions{} + + cmd := &cobra.Command{ + Use: "disconnect [OPTIONS] NETWORK CONTAINER", + Short: "Disconnect a container from a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runDisconnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") + + return cmd +} + +func runDisconnect(dockerCli *command.DockerCli, opts disconnectOptions) error { + client := dockerCli.Client() + + return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) +} diff --git a/vendor/github.com/docker/cli/cli/command/network/inspect.go b/vendor/github.com/docker/cli/cli/command/network/inspect.go new file mode 100644 index 0000000..9856c04 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/network/inspect.go @@ -0,0 +1,48 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string + verbose bool +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NETWORK [NETWORK...]", + Short: "Display detailed information on one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + cmd.Flags().BoolVarP(&opts.verbose, "verbose", "v", false, "Verbose output for diagnostics") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getNetFunc := func(name string) (interface{}, []byte, error) { + return client.NetworkInspectWithRaw(ctx, name, types.NetworkInspectOptions{Verbose: opts.verbose}) + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) +} diff --git a/vendor/github.com/docker/cli/cli/command/network/list.go b/vendor/github.com/docker/cli/cli/command/network/list.go new file mode 100644 index 0000000..79a860f --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/network/list.go @@ -0,0 +1,75 @@ +package network + +import ( + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type byNetworkName []types.NetworkResource + +func (r byNetworkName) Len() int { return len(r) } +func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } + +type listOptions struct { + quiet bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display network IDs") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Do not truncate the output") + flags.StringVar(&options.format, "format", "", "Pretty-print networks using a Go template") + flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'driver=bridge')") + + return cmd +} + +func runList(dockerCli *command.DockerCli, options listOptions) error { + client := dockerCli.Client() + listOptions := types.NetworkListOptions{Filters: options.filter.Value()} + networkResources, err := client.NetworkList(context.Background(), listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().NetworksFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byNetworkName(networkResources)) + + networksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewNetworkFormat(format, options.quiet), + Trunc: !options.noTrunc, + } + return formatter.NetworkWrite(networksCtx, networkResources) +} diff --git a/vendor/github.com/docker/cli/cli/command/network/prune.go b/vendor/github.com/docker/cli/cli/command/network/prune.go new file mode 100644 index 0000000..b31db99 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/network/prune.go @@ -0,0 +1,76 @@ +package network + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for networks +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const warning = `WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli command.Cli, options pruneOptions) (output string, err error) { + pruneFilters := command.PruneFilters(dockerCli, options.filter.Value()) + + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().NetworksPrune(context.Background(), pruneFilters) + if err != nil { + return + } + + if len(report.NetworksDeleted) > 0 { + output = "Deleted Networks:\n" + for _, id := range report.NetworksDeleted { + output += id + "\n" + } + } + + return +} + +// RunPrune calls the Network Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) { + output, err := runPrune(dockerCli, pruneOptions{force: true, filter: filter}) + return 0, output, err +} diff --git a/vendor/github.com/docker/cli/cli/command/network/remove.go b/vendor/github.com/docker/cli/cli/command/network/remove.go new file mode 100644 index 0000000..7dfe8da --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/network/remove.go @@ -0,0 +1,54 @@ +package network + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "rm NETWORK [NETWORK...]", + Aliases: []string{"remove"}, + Short: "Remove one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } +} + +const ingressWarning = "WARNING! Before removing the routing-mesh network, " + + "make sure all the nodes in your swarm run the same docker engine version. " + + "Otherwise, removal may not be effective and functionality of newly create " + + "ingress networks will be impaired.\nAre you sure you want to continue?" + +func runRemove(dockerCli *command.DockerCli, networks []string) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range networks { + if nw, _, err := client.NetworkInspectWithRaw(ctx, name, types.NetworkInspectOptions{}); err == nil && + nw.Ingress && + !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), ingressWarning) { + continue + } + if err := client.NetworkRemove(ctx, name); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/out.go b/vendor/github.com/docker/cli/cli/command/out.go new file mode 100644 index 0000000..27b44c2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/out.go @@ -0,0 +1,50 @@ +package command + +import ( + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/term" +) + +// OutStream is an output stream used by the DockerCli to write normal program +// output. +type OutStream struct { + CommonStream + out io.Writer +} + +func (o *OutStream) Write(p []byte) (int, error) { + return o.out.Write(p) +} + +// SetRawTerminal sets raw mode on the input terminal +func (o *OutStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !o.CommonStream.isTerminal { + return nil + } + o.CommonStream.state, err = term.SetRawTerminalOutput(o.CommonStream.fd) + return err +} + +// GetTtySize returns the height and width in characters of the tty +func (o *OutStream) GetTtySize() (uint, uint) { + if !o.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(o.fd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return uint(ws.Height), uint(ws.Width) +} + +// NewOutStream returns a new OutStream object from a Writer +func NewOutStream(out io.Writer) *OutStream { + fd, isTerminal := term.GetFdInfo(out) + return &OutStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, out: out} +} diff --git a/vendor/github.com/docker/cli/cli/command/prune/prune.go b/vendor/github.com/docker/cli/cli/command/prune/prune.go new file mode 100644 index 0000000..e916a82 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/prune/prune.go @@ -0,0 +1,51 @@ +package prune + +import ( + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/container" + "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/command/network" + "github.com/docker/cli/cli/command/volume" + "github.com/docker/cli/opts" + "github.com/spf13/cobra" +) + +// NewContainerPruneCommand returns a cobra prune command for containers +func NewContainerPruneCommand(dockerCli command.Cli) *cobra.Command { + return container.NewPruneCommand(dockerCli) +} + +// NewVolumePruneCommand returns a cobra prune command for volumes +func NewVolumePruneCommand(dockerCli command.Cli) *cobra.Command { + return volume.NewPruneCommand(dockerCli) +} + +// NewImagePruneCommand returns a cobra prune command for images +func NewImagePruneCommand(dockerCli command.Cli) *cobra.Command { + return image.NewPruneCommand(dockerCli) +} + +// NewNetworkPruneCommand returns a cobra prune command for Networks +func NewNetworkPruneCommand(dockerCli command.Cli) *cobra.Command { + return network.NewPruneCommand(dockerCli) +} + +// RunContainerPrune executes a prune command for containers +func RunContainerPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) { + return container.RunPrune(dockerCli, filter) +} + +// RunVolumePrune executes a prune command for volumes +func RunVolumePrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) { + return volume.RunPrune(dockerCli, filter) +} + +// RunImagePrune executes a prune command for images +func RunImagePrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { + return image.RunPrune(dockerCli, all, filter) +} + +// RunNetworkPrune executes a prune command for networks +func RunNetworkPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) { + return network.RunPrune(dockerCli, filter) +} diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go new file mode 100644 index 0000000..802b3a4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/registry.go @@ -0,0 +1,189 @@ +package command + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +// ElectAuthServer returns the default registry to use (by asking the daemon) +func ElectAuthServer(ctx context.Context, cli Cli) string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.Client().Info(ctx); err != nil { + fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else if info.IndexServerAddress == "" { + fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info +// for the given command. +func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) + authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) + if err != nil { + return "", err + } + return EncodeAuthToBase64(authConfig) + } +} + +// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = ElectAuthServer(ctx, cli) + } + + a, _ := cli.CredentialsStore(configKey).Get(configKey) + return a +} + +// ConfigureAuth returns an AuthConfig from the specified user, password and server. +func ConfigureAuth(cli Cli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.SetIn(NewInStream(os.Stdin)) + } + + if !isDefaultRegistry { + serverAddress = registry.ConvertToHostname(serverAddress) + } + + authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress) + if err != nil { + return authconfig, err + } + + // Some links documenting this: + // - https://code.google.com/archive/p/mintty/issues/56 + // - https://github.com/docker/docker/issues/15272 + // - https://mintty.github.io/ (compatibility) + // Linux will hit this if you attempt `cat | docker login`, and Windows + // will hit this if you attempt docker login from mintty where stdin + // is a pipe, not a character based console. + if flPassword == "" && !cli.In().IsTerminal() { + return authconfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a default registry (docker hub), then display the following message. + fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + promptWithDefault(cli.Out(), "Username", authconfig.Username) + flUser = readInput(cli.In(), cli.Out()) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + if flUser == "" { + return authconfig, errors.Errorf("Error: Non-null Username Required") + } + if flPassword == "" { + oldState, err := term.SaveState(cli.In().FD()) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.Out(), "Password: ") + term.DisableEcho(cli.In().FD(), oldState) + + flPassword = readInput(cli.In(), cli.Out()) + fmt.Fprint(cli.Out(), "\n") + + term.RestoreTerminal(cli.In().FD(), oldState) + if flPassword == "" { + return authconfig, errors.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +func promptWithDefault(out io.Writer, prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(out, "%s: ", prompt) + } else { + fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) + } +} + +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image +func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) { + // Retrieve encoded auth token from the image reference + authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) + if err != nil { + return "", err + } + encodedAuth, err := EncodeAuthToBase64(authConfig) + if err != nil { + return "", err + } + return encodedAuth, nil +} + +// resolveAuthConfigFromImage retrieves that AuthConfig using the image string +func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) { + registryRef, err := reference.ParseNormalizedNamed(image) + if err != nil { + return types.AuthConfig{}, err + } + repoInfo, err := registry.ParseRepositoryInfo(registryRef) + if err != nil { + return types.AuthConfig{}, err + } + return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil +} diff --git a/vendor/github.com/docker/cli/cli/command/registry/login.go b/vendor/github.com/docker/cli/cli/command/registry/login.go new file mode 100644 index 0000000..ba1b133 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/registry/login.go @@ -0,0 +1,82 @@ +package registry + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type loginOptions struct { + serverAddress string + user string + password string +} + +// NewLoginCommand creates a new `docker login` command +func NewLoginCommand(dockerCli command.Cli) *cobra.Command { + var opts loginOptions + + cmd := &cobra.Command{ + Use: "login [OPTIONS] [SERVER]", + Short: "Log in to a Docker registry", + Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.serverAddress = args[0] + } + return runLogin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.user, "username", "u", "", "Username") + flags.StringVarP(&opts.password, "password", "p", "", "Password") + + return cmd +} + +func runLogin(dockerCli command.Cli, opts loginOptions) error { + ctx := context.Background() + clnt := dockerCli.Client() + + var ( + serverAddress string + authServer = command.ElectAuthServer(ctx, dockerCli) + ) + if opts.serverAddress != "" && opts.serverAddress != registry.DefaultNamespace { + serverAddress = opts.serverAddress + } else { + serverAddress = authServer + } + + isDefaultRegistry := serverAddress == authServer + + authConfig, err := command.ConfigureAuth(dockerCli, opts.user, opts.password, serverAddress, isDefaultRegistry) + if err != nil { + return err + } + response, err := clnt.RegistryLogin(ctx, authConfig) + if err != nil { + return err + } + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + if err := dockerCli.CredentialsStore(serverAddress).Store(authConfig); err != nil { + return errors.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(dockerCli.Out(), response.Status) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/registry/logout.go b/vendor/github.com/docker/cli/cli/command/registry/logout.go new file mode 100644 index 0000000..d241df4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/registry/logout.go @@ -0,0 +1,77 @@ +package registry + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewLogoutCommand creates a new `docker logout` command +func NewLogoutCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "logout [SERVER]", + Short: "Log out from a Docker registry", + Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var serverAddress string + if len(args) > 0 { + serverAddress = args[0] + } + return runLogout(dockerCli, serverAddress) + }, + } + + return cmd +} + +func runLogout(dockerCli command.Cli, serverAddress string) error { + ctx := context.Background() + var isDefaultRegistry bool + + if serverAddress == "" { + serverAddress = command.ElectAuthServer(ctx, dockerCli) + isDefaultRegistry = true + } + + var ( + loggedIn bool + regsToLogout []string + hostnameAddress = serverAddress + regsToTry = []string{serverAddress} + ) + if !isDefaultRegistry { + hostnameAddress = registry.ConvertToHostname(serverAddress) + // the tries below are kept for backward compatibility where a user could have + // saved the registry in one of the following format. + regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + for _, s := range regsToTry { + if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { + loggedIn = true + regsToLogout = append(regsToLogout, s) + } + } + + if !loggedIn { + fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) + return nil + } + + fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) + for _, r := range regsToLogout { + if err := dockerCli.CredentialsStore(r).Erase(r); err != nil { + fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/registry/search.go b/vendor/github.com/docker/cli/cli/command/registry/search.go new file mode 100644 index 0000000..443c9a3 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/registry/search.go @@ -0,0 +1,125 @@ +package registry + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type searchOptions struct { + term string + noTrunc bool + limit int + filter opts.FilterOpt + + // Deprecated + stars uint + automated bool +} + +// NewSearchCommand creates a new `docker search` command +func NewSearchCommand(dockerCli command.Cli) *cobra.Command { + options := searchOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "search [OPTIONS] TERM", + Short: "Search the Docker Hub for images", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.term = args[0] + return runSearch(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + flags.IntVar(&options.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") + + flags.BoolVar(&options.automated, "automated", false, "Only show automated builds") + flags.UintVarP(&options.stars, "stars", "s", 0, "Only displays with at least x stars") + + flags.MarkDeprecated("automated", "use --filter=is-automated=true instead") + flags.MarkDeprecated("stars", "use --filter=stars=3 instead") + + return cmd +} + +func runSearch(dockerCli command.Cli, options searchOptions) error { + indexInfo, err := registry.ParseSearchIndexInfo(options.term) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, indexInfo) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, indexInfo, "search") + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + searchOptions := types.ImageSearchOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + Filters: options.filter.Value(), + Limit: options.limit, + } + + clnt := dockerCli.Client() + + unorderedResults, err := clnt.ImageSearch(ctx, options.term, searchOptions) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + + w := tabwriter.NewWriter(dockerCli.Out(), 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + // --automated and -s, --stars are deprecated since Docker 1.12 + if (options.automated && !res.IsAutomated) || (int(options.stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !options.noTrunc { + desc = stringutils.Ellipsis(desc, 45) + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// searchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/vendor/github.com/docker/cli/cli/command/stream.go b/vendor/github.com/docker/cli/cli/command/stream.go new file mode 100644 index 0000000..71a43fa --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stream.go @@ -0,0 +1,34 @@ +package command + +import ( + "github.com/docker/docker/pkg/term" +) + +// CommonStream is an input stream used by the DockerCli to read user input +type CommonStream struct { + fd uintptr + isTerminal bool + state *term.State +} + +// FD returns the file descriptor number for this stream +func (s *CommonStream) FD() uintptr { + return s.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (s *CommonStream) IsTerminal() bool { + return s.isTerminal +} + +// RestoreTerminal restores normal mode to the terminal +func (s *CommonStream) RestoreTerminal() { + if s.state != nil { + term.RestoreTerminal(s.fd, s.state) + } +} + +// SetIsTerminal sets the boolean used for isTerminal +func (s *CommonStream) SetIsTerminal(isTerminal bool) { + s.isTerminal = isTerminal +} diff --git a/vendor/github.com/docker/cli/cli/command/system/cmd.go b/vendor/github.com/docker/cli/cli/command/system/cmd.go new file mode 100644 index 0000000..455387c --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/system/cmd.go @@ -0,0 +1,26 @@ +package system + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewSystemCommand returns a cobra command for `system` subcommands +// nolint: interfacer +func NewSystemCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "system", + Short: "Manage Docker", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewEventsCommand(dockerCli), + NewInfoCommand(dockerCli), + NewDiskUsageCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/system/df.go b/vendor/github.com/docker/cli/cli/command/system/df.go new file mode 100644 index 0000000..44c662e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/system/df.go @@ -0,0 +1,68 @@ +package system + +import ( + "errors" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type diskUsageOptions struct { + verbose bool + format string +} + +// NewDiskUsageCommand creates a new cobra.Command for `docker df` +func NewDiskUsageCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts diskUsageOptions + + cmd := &cobra.Command{ + Use: "df [OPTIONS]", + Short: "Show docker disk usage", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runDiskUsage(dockerCli, opts) + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + + return cmd +} + +func runDiskUsage(dockerCli *command.DockerCli, opts diskUsageOptions) error { + if opts.verbose && len(opts.format) != 0 { + return errors.New("the verbose and the format options conflict") + } + + du, err := dockerCli.Client().DiskUsage(context.Background()) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + duCtx := formatter.DiskUsageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewDiskUsageFormat(format), + }, + LayersSize: du.LayersSize, + Images: du.Images, + Containers: du.Containers, + Volumes: du.Volumes, + Verbose: opts.verbose, + } + + return duCtx.Write() +} diff --git a/vendor/github.com/docker/cli/cli/command/system/events.go b/vendor/github.com/docker/cli/cli/command/system/events.go new file mode 100644 index 0000000..ba83d26 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/system/events.go @@ -0,0 +1,139 @@ +package system + +import ( + "fmt" + "io" + "io/ioutil" + "sort" + "strings" + "text/template" + "time" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/templates" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type eventsOptions struct { + since string + until string + filter opts.FilterOpt + format string +} + +// NewEventsCommand creates a new cobra.Command for `docker events` +func NewEventsCommand(dockerCli *command.DockerCli) *cobra.Command { + options := eventsOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "events [OPTIONS]", + Short: "Get real time events from the server", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runEvents(dockerCli, &options) + }, + } + + flags := cmd.Flags() + flags.StringVar(&options.since, "since", "", "Show all events created since timestamp") + flags.StringVar(&options.until, "until", "", "Stream events until this timestamp") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + flags.StringVar(&options.format, "format", "", "Format the output using the given Go template") + + return cmd +} + +func runEvents(dockerCli *command.DockerCli, options *eventsOptions) error { + tmpl, err := makeTemplate(options.format) + if err != nil { + return cli.StatusError{ + StatusCode: 64, + Status: "Error parsing format: " + err.Error()} + } + eventOptions := types.EventsOptions{ + Since: options.since, + Until: options.until, + Filters: options.filter.Value(), + } + + ctx, cancel := context.WithCancel(context.Background()) + events, errs := dockerCli.Client().Events(ctx, eventOptions) + defer cancel() + + out := dockerCli.Out() + + for { + select { + case event := <-events: + if err := handleEvent(out, event, tmpl); err != nil { + return err + } + case err := <-errs: + if err == io.EOF { + return nil + } + return err + } + } +} + +func handleEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + if tmpl == nil { + return prettyPrintEvent(out, event) + } + + return formatEvent(out, event, tmpl) +} + +func makeTemplate(format string) (*template.Template, error) { + if format == "" { + return nil, nil + } + tmpl, err := templates.Parse(format) + if err != nil { + return tmpl, err + } + // we execute the template for an empty message, so as to validate + // a bad template like "{{.badFieldString}}" + return tmpl, tmpl.Execute(ioutil.Discard, &eventtypes.Message{}) +} + +// prettyPrintEvent prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func prettyPrintEvent(out io.Writer, event eventtypes.Message) error { + if event.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + + fmt.Fprintf(out, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(out, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(out, "\n") + return nil +} + +func formatEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + defer out.Write([]byte{'\n'}) + return tmpl.Execute(out, event) +} diff --git a/vendor/github.com/docker/cli/cli/command/system/info.go b/vendor/github.com/docker/cli/cli/command/system/info.go new file mode 100644 index 0000000..f2a404e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/system/info.go @@ -0,0 +1,381 @@ +package system + +import ( + "fmt" + "io" + "sort" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/debug" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/templates" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type infoOptions struct { + format string +} + +// NewInfoCommand creates a new cobra.Command for `docker info` +func NewInfoCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts infoOptions + + cmd := &cobra.Command{ + Use: "info [OPTIONS]", + Short: "Display system-wide information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInfo(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInfo(dockerCli *command.DockerCli, opts *infoOptions) error { + ctx := context.Background() + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if opts.format == "" { + return prettyPrintInfo(dockerCli, info) + } + return formatInfo(dockerCli, info, opts.format) +} + +// nolint: gocyclo +func prettyPrintInfo(dockerCli *command.DockerCli, info types.Info) error { + fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) + fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) + fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) + fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) + fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) + fprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) + fprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) + } + + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) + } + } + fprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) + fprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) + + fmt.Fprintf(dockerCli.Out(), "Plugins: \n") + fmt.Fprintf(dockerCli.Out(), " Volume:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + fmt.Fprintf(dockerCli.Out(), " Network:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintf(dockerCli.Out(), " Authorization:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + } + + fmt.Fprintf(dockerCli.Out(), " Log:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Log, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + + fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) + if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive && info.Swarm.LocalNodeState != swarm.LocalNodeStateLocked { + fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) + if info.Swarm.Error != "" { + fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) + } + fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable) + if info.Swarm.Cluster != nil && info.Swarm.ControlAvailable && info.Swarm.Error == "" && info.Swarm.LocalNodeState != swarm.LocalNodeStateError { + fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID) + fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) + fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) + fmt.Fprintf(dockerCli.Out(), " Orchestration:\n") + taskHistoryRetentionLimit := int64(0) + if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil { + taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit + } + fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", taskHistoryRetentionLimit) + fmt.Fprintf(dockerCli.Out(), " Raft:\n") + fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) + if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil { + fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots) + } + fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) + fmt.Fprintf(dockerCli.Out(), " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick) + fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n") + fmt.Fprintf(dockerCli.Out(), " Heartbeat Period: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod)) + fmt.Fprintf(dockerCli.Out(), " CA Configuration:\n") + fmt.Fprintf(dockerCli.Out(), " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) + fmt.Fprintf(dockerCli.Out(), " Force Rotate: %d\n", info.Swarm.Cluster.Spec.CAConfig.ForceRotate) + fprintfIfNotEmpty(dockerCli.Out(), " Signing CA Certificate: \n%s\n\n", strings.TrimSpace(info.Swarm.Cluster.Spec.CAConfig.SigningCACert)) + if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { + fmt.Fprintf(dockerCli.Out(), " External CAs:\n") + for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) + } + } + fmt.Fprintf(dockerCli.Out(), " Root Rotation In Progress: %v\n", info.Swarm.Cluster.RootRotationInProgress) + } + fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr) + managers := []string{} + for _, entry := range info.Swarm.RemoteManagers { + managers = append(managers, entry.Addr) + } + if len(managers) > 0 { + sort.Strings(managers) + fmt.Fprintf(dockerCli.Out(), " Manager Addresses:\n") + for _, entry := range managers { + fmt.Fprintf(dockerCli.Out(), " %s\n", entry) + } + } + } + + if len(info.Runtimes) > 0 { + fmt.Fprintf(dockerCli.Out(), "Runtimes:") + for name := range info.Runtimes { + fmt.Fprintf(dockerCli.Out(), " %s", name) + } + fmt.Fprint(dockerCli.Out(), "\n") + fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) + } + + if info.OSType == "linux" { + fmt.Fprintf(dockerCli.Out(), "Init Binary: %v\n", info.InitBinary) + + for _, ci := range []struct { + Name string + Commit types.Commit + }{ + {"containerd", info.ContainerdCommit}, + {"runc", info.RuncCommit}, + {"init", info.InitCommit}, + } { + fmt.Fprintf(dockerCli.Out(), "%s version: %s", ci.Name, ci.Commit.ID) + if ci.Commit.ID != ci.Commit.Expected { + fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected) + } + fmt.Fprintf(dockerCli.Out(), "\n") + } + if len(info.SecurityOptions) != 0 { + kvs, err := types.DecodeSecurityOptions(info.SecurityOptions) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Security Options:\n") + for _, so := range kvs { + fmt.Fprintf(dockerCli.Out(), " %s\n", so.Name) + for _, o := range so.Options { + switch o.Key { + case "profile": + if o.Value != "default" { + fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n") + } + fmt.Fprintf(dockerCli.Out(), " Profile: %s\n", o.Value) + } + } + } + } + } + + // Isolation only has meaning on a Windows daemon. + if info.OSType == "windows" { + fmt.Fprintf(dockerCli.Out(), "Default Isolation: %v\n", info.Isolation) + } + + fprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) + fprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) + fprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) + fprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) + fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) + fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + fprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) + fprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) + fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) + fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", debug.IsEnabled()) + fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) + + if info.Debug { + fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) + fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) + } + + fprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) + fprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) + fprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) + } + fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) + } + + if info.Labels != nil { + fmt.Fprintln(dockerCli.Out(), "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) + } + // TODO: Engine labels with duplicate keys has been deprecated in 1.13 and will be error out + // after 3 release cycles (17.12). For now, a WARNING will be generated. The following will + // be removed eventually. + labelMap := map[string]string{} + for _, label := range info.Labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will throw out a warning + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + fmt.Fprintln(dockerCli.Err(), "WARNING: labels with duplicate keys and conflicting values have been deprecated") + break + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + } + + fmt.Fprintf(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) + } + + if info.ClusterAdvertise != "" { + fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) + } + + if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { + fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") + for _, registry := range info.RegistryConfig.IndexConfigs { + if !registry.Secure { + fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) + } + } + + for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { + mask, _ := registry.Mask.Size() + fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) + } + } + + if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { + fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:") + for _, mirror := range info.RegistryConfig.Mirrors { + fmt.Fprintf(dockerCli.Out(), " %s\n", mirror) + } + } + + fmt.Fprintf(dockerCli.Out(), "Live Restore Enabled: %v\n\n", info.LiveRestoreEnabled) + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + printStorageDriverWarnings(dockerCli, info) + + if !info.MemoryLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + return nil +} + +func printStorageDriverWarnings(dockerCli *command.DockerCli, info types.Info) { + if info.DriverStatus == nil { + return + } + + for _, pair := range info.DriverStatus { + if pair[0] == "Data loop file" { + fmt.Fprintf(dockerCli.Err(), "WARNING: %s: usage of loopback devices is "+ + "strongly discouraged for production use.\n "+ + "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.\n", info.Driver) + } + if pair[0] == "Supports d_type" && pair[1] == "false" { + backingFs := getBackingFs(info) + + msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", info.Driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" + } + msg += " Running without d_type support will not be supported in future releases." + fmt.Fprintln(dockerCli.Err(), msg) + } + } +} + +func getBackingFs(info types.Info) string { + if info.DriverStatus == nil { + return "" + } + + for _, pair := range info.DriverStatus { + if pair[0] == "Backing Filesystem" { + return pair[1] + } + } + return "" +} + +func formatInfo(dockerCli *command.DockerCli, info types.Info, format string) error { + tmpl, err := templates.Parse(format) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + err = tmpl.Execute(dockerCli.Out(), info) + dockerCli.Out().Write([]byte{'\n'}) + return err +} + +func fprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} diff --git a/vendor/github.com/docker/cli/cli/command/system/inspect.go b/vendor/github.com/docker/cli/cli/command/system/inspect.go new file mode 100644 index 0000000..6d079cd --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/system/inspect.go @@ -0,0 +1,216 @@ +package system + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/docker/docker/api/types" + apiclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + format string + inspectType string + size bool + ids []string +} + +// NewInspectCommand creates a new cobra.Command for `docker inspect` +func NewInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NAME|ID [NAME|ID...]", + Short: "Return low-level information on Docker objects", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.ids = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.StringVar(&opts.inspectType, "type", "", "Return JSON for specified type") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes if the type is container") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + var elementSearcher inspect.GetRefFunc + switch opts.inspectType { + case "", "container", "image", "node", "network", "service", "volume", "task", "plugin", "secret": + elementSearcher = inspectAll(context.Background(), dockerCli, opts.size, opts.inspectType) + default: + return errors.Errorf("%q is not a valid value for --type", opts.inspectType) + } + return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, elementSearcher) +} + +func inspectContainers(ctx context.Context, dockerCli *command.DockerCli, getSize bool) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ContainerInspectWithRaw(ctx, ref, getSize) + } +} + +func inspectImages(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ImageInspectWithRaw(ctx, ref) + } +} + +func inspectNetwork(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NetworkInspectWithRaw(ctx, ref, types.NetworkInspectOptions{}) + } +} + +func inspectNode(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NodeInspectWithRaw(ctx, ref) + } +} + +func inspectService(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + // Service inspect shows defaults values in empty fields. + return dockerCli.Client().ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true}) + } +} + +func inspectTasks(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().TaskInspectWithRaw(ctx, ref) + } +} + +func inspectVolume(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().VolumeInspectWithRaw(ctx, ref) + } +} + +func inspectPlugin(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().PluginInspectWithRaw(ctx, ref) + } +} + +func inspectSecret(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().SecretInspectWithRaw(ctx, ref) + } +} + +func inspectAll(ctx context.Context, dockerCli *command.DockerCli, getSize bool, typeConstraint string) inspect.GetRefFunc { + var inspectAutodetect = []struct { + objectType string + isSizeSupported bool + isSwarmObject bool + objectInspector func(string) (interface{}, []byte, error) + }{ + { + objectType: "container", + isSizeSupported: true, + objectInspector: inspectContainers(ctx, dockerCli, getSize), + }, + { + objectType: "image", + objectInspector: inspectImages(ctx, dockerCli), + }, + { + objectType: "network", + objectInspector: inspectNetwork(ctx, dockerCli), + }, + { + objectType: "volume", + objectInspector: inspectVolume(ctx, dockerCli), + }, + { + objectType: "service", + isSwarmObject: true, + objectInspector: inspectService(ctx, dockerCli), + }, + { + objectType: "task", + isSwarmObject: true, + objectInspector: inspectTasks(ctx, dockerCli), + }, + { + objectType: "node", + isSwarmObject: true, + objectInspector: inspectNode(ctx, dockerCli), + }, + { + objectType: "plugin", + objectInspector: inspectPlugin(ctx, dockerCli), + }, + { + objectType: "secret", + isSwarmObject: true, + objectInspector: inspectSecret(ctx, dockerCli), + }, + } + + // isSwarmManager does an Info API call to verify that the daemon is + // a swarm manager. + isSwarmManager := func() bool { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return false + } + return info.Swarm.ControlAvailable + } + + isErrNotSupported := func(err error) bool { + return strings.Contains(err.Error(), "not supported") + } + + return func(ref string) (interface{}, []byte, error) { + const ( + swarmSupportUnknown = iota + swarmSupported + swarmUnsupported + ) + + isSwarmSupported := swarmSupportUnknown + + for _, inspectData := range inspectAutodetect { + if typeConstraint != "" && inspectData.objectType != typeConstraint { + continue + } + if typeConstraint == "" && inspectData.isSwarmObject { + if isSwarmSupported == swarmSupportUnknown { + if isSwarmManager() { + isSwarmSupported = swarmSupported + } else { + isSwarmSupported = swarmUnsupported + } + } + if isSwarmSupported == swarmUnsupported { + continue + } + } + v, raw, err := inspectData.objectInspector(ref) + if err != nil { + if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSupported(err)) { + continue + } + return v, raw, err + } + if getSize && !inspectData.isSizeSupported { + fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.objectType) + } + return v, raw, err + } + return nil, nil, errors.Errorf("Error: No such object: %s", ref) + } +} diff --git a/vendor/github.com/docker/cli/cli/command/system/prune.go b/vendor/github.com/docker/cli/cli/command/system/prune.go new file mode 100644 index 0000000..fca9c98 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/system/prune.go @@ -0,0 +1,103 @@ +package system + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/prune" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool + pruneVolumes bool + filter opts.FilterOpt +} + +// NewPruneCommand creates a new cobra.Command for `docker prune` +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused data", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPrune(dockerCli, options) + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images not just dangling ones") + flags.BoolVar(&options.pruneVolumes, "volumes", false, "Prune volumes") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'label==')") + // "filter" flag is available in 1.28 (docker 17.04) and up + flags.SetAnnotation("filter", "version", []string{"1.28"}) + + return cmd +} + +const ( + warning = `WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + %s +Are you sure you want to continue?` + + danglingImageDesc = "- all dangling images" + allImageDesc = `- all images without at least one container associated to them` +) + +func runPrune(dockerCli command.Cli, options pruneOptions) error { + var message string + + if options.all { + message = fmt.Sprintf(warning, allImageDesc) + } else { + message = fmt.Sprintf(warning, danglingImageDesc) + } + + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), message) { + return nil + } + + var spaceReclaimed uint64 + pruneFuncs := []func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error){ + prune.RunContainerPrune, + prune.RunNetworkPrune, + } + if options.pruneVolumes { + pruneFuncs = append(pruneFuncs, prune.RunVolumePrune) + } + + for _, pruneFn := range pruneFuncs { + spc, output, err := pruneFn(dockerCli, options.filter) + if err != nil { + return err + } + spaceReclaimed += spc + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + } + + spc, output, err := prune.RunImagePrune(dockerCli, options.all, options.filter) + if err != nil { + return err + } + if spc > 0 { + spaceReclaimed += spc + fmt.Fprintln(dockerCli.Out(), output) + } + + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/system/version.go b/vendor/github.com/docker/cli/cli/command/system/version.go new file mode 100644 index 0000000..21c5a6d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/system/version.go @@ -0,0 +1,130 @@ +package system + +import ( + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/templates" + "github.com/spf13/cobra" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.APIVersion}}{{if ne .Client.APIVersion .Client.DefaultAPIVersion}} (downgraded from {{.Client.DefaultAPIVersion}}){{end}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.APIVersion}} (minimum version {{.Server.MinAPIVersion}}) + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}} + Experimental: {{.Server.Experimental}}{{end}}` + +type versionOptions struct { + format string +} + +// versionInfo contains version information of both the Client, and Server +type versionInfo struct { + Client clientVersion + Server *types.Version +} + +type clientVersion struct { + Version string + APIVersion string `json:"ApiVersion"` + DefaultAPIVersion string `json:"DefaultAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + BuildTime string `json:",omitempty"` +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v versionInfo) ServerOK() bool { + return v.Server != nil +} + +// NewVersionCommand creates a new cobra.Command for `docker version` +func NewVersionCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts versionOptions + + cmd := &cobra.Command{ + Use: "version [OPTIONS]", + Short: "Show the Docker version information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runVersion(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runVersion(dockerCli *command.DockerCli, opts *versionOptions) error { + ctx := context.Background() + + templateFormat := versionTemplate + if opts.format != "" { + templateFormat = opts.format + } + + tmpl, err := templates.Parse(templateFormat) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + vd := versionInfo{ + Client: clientVersion{ + Version: cli.Version, + APIVersion: dockerCli.Client().ClientVersion(), + DefaultAPIVersion: dockerCli.DefaultVersion(), + GoVersion: runtime.Version(), + GitCommit: cli.GitCommit, + BuildTime: cli.BuildTime, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + + serverVersion, err := dockerCli.Client().ServerVersion(ctx) + if err == nil { + vd.Server = &serverVersion + } + + // first we need to make BuildTime more human friendly + t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) + if errTime == nil { + vd.Client.BuildTime = t.Format(time.ANSIC) + } + + if vd.ServerOK() { + t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) + if errTime == nil { + vd.Server.BuildTime = t.Format(time.ANSIC) + } + } + + if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil { + err = err2 + } + dockerCli.Out().Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/docker/cli/cli/command/trust.go b/vendor/github.com/docker/cli/cli/command/trust.go new file mode 100644 index 0000000..c0742bc --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/trust.go @@ -0,0 +1,43 @@ +package command + +import ( + "os" + "strconv" + + "github.com/spf13/pflag" +) + +var ( + // TODO: make this not global + untrusted bool +) + +// AddTrustVerificationFlags adds content trust flags to the provided flagset +func AddTrustVerificationFlags(fs *pflag.FlagSet) { + trusted := getDefaultTrustState() + fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image verification") +} + +// AddTrustSigningFlags adds "signing" flags to the provided flagset +func AddTrustSigningFlags(fs *pflag.FlagSet) { + trusted := getDefaultTrustState() + fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image signing") +} + +// getDefaultTrustState returns true if content trust is enabled through the $DOCKER_CONTENT_TRUST environment variable. +func getDefaultTrustState() bool { + var trusted bool + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + trusted = true + } + } + return trusted +} + +// IsTrusted returns true if content trust is enabled, either through the $DOCKER_CONTENT_TRUST environment variable, +// or through `--disabled-content-trust=false` on a command. +func IsTrusted() bool { + return !untrusted +} diff --git a/vendor/github.com/docker/cli/cli/command/utils.go b/vendor/github.com/docker/cli/cli/command/utils.go new file mode 100644 index 0000000..3f4acaa --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/utils.go @@ -0,0 +1,119 @@ +package command + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/system" +) + +// CopyToFile writes the content of the reader to the specified file +func CopyToFile(outfile string, r io.Reader) error { + // We use sequential file access here to avoid depleting the standby list + // on Windows. On Linux, this is a call directly to ioutil.TempFile + tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// capitalizeFirst capitalizes the first character of string +func capitalizeFirst(s string) string { + switch l := len(s); l { + case 0: + return s + case 1: + return strings.ToLower(s) + default: + return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) + } +} + +// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. +func PrettyPrint(i interface{}) string { + switch t := i.(type) { + case nil: + return "None" + case string: + return capitalizeFirst(t) + default: + return capitalizeFirst(fmt.Sprintf("%s", t)) + } +} + +// PromptForConfirmation requests and checks confirmation from user. +// This will display the provided message followed by ' [y/N] '. If +// the user input 'y' or 'Y' it returns true other false. If no +// message is provided "Are you sure you want to proceed? [y/N] " +// will be used instead. +func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool { + if message == "" { + message = "Are you sure you want to proceed?" + } + message += " [y/N] " + + fmt.Fprintf(outs, message) + + // On Windows, force the use of the regular OS stdin stream. + if runtime.GOOS == "windows" { + ins = NewInStream(os.Stdin) + } + + reader := bufio.NewReader(ins) + answer, _, _ := reader.ReadLine() + return strings.ToLower(string(answer)) == "y" +} + +// PruneFilters returns consolidated prune filters obtained from config.json and cli +func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { + if dockerCli.ConfigFile() == nil { + return pruneFilters + } + for _, f := range dockerCli.ConfigFile().PruneFilters { + parts := strings.SplitN(f, "=", 2) + if len(parts) != 2 { + continue + } + if parts[0] == "label" { + // CLI label filter supersede config.json. + // If CLI label filter conflict with config.json, + // skip adding label! filter in config.json. + if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) { + continue + } + } else if parts[0] == "label!" { + // CLI label! filter supersede config.json. + // If CLI label! filter conflict with config.json, + // skip adding label filter in config.json. + if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) { + continue + } + } + pruneFilters.Add(parts[0], parts[1]) + } + + return pruneFilters +} diff --git a/vendor/github.com/docker/cli/cli/command/volume/cmd.go b/vendor/github.com/docker/cli/cli/command/volume/cmd.go new file mode 100644 index 0000000..cf6e61d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/volume/cmd.go @@ -0,0 +1,26 @@ +package volume + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +// NewVolumeCommand returns a cobra command for `volume` subcommands +func NewVolumeCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "volume COMMAND", + Short: "Manage volumes", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + Tags: map[string]string{"version": "1.21"}, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/volume/create.go b/vendor/github.com/docker/cli/cli/command/volume/create.go new file mode 100644 index 0000000..5a10922 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/volume/create.go @@ -0,0 +1,69 @@ +package volume + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts +} + +func newCreateCommand(dockerCli command.Cli) *cobra.Command { + options := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(opts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] [VOLUME]", + Short: "Create a volume", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + if options.name != "" { + return errors.Errorf("Conflicting options: either specify --name or provide positional arg, not both\n") + } + options.name = args[0] + } + return runCreate(dockerCli, options) + }, + } + flags := cmd.Flags() + flags.StringVarP(&options.driver, "driver", "d", "local", "Specify volume driver name") + flags.StringVar(&options.name, "name", "", "Specify volume name") + flags.Lookup("name").Hidden = true + flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&options.labels, "label", "Set metadata for a volume") + + return cmd +} + +func runCreate(dockerCli command.Cli, options createOptions) error { + client := dockerCli.Client() + + volReq := volumetypes.VolumesCreateBody{ + Driver: options.driver, + DriverOpts: options.driverOpts.GetAll(), + Name: options.name, + Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), + } + + vol, err := client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/volume/inspect.go b/vendor/github.com/docker/cli/cli/command/volume/inspect.go new file mode 100644 index 0000000..b8729e2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/volume/inspect.go @@ -0,0 +1,45 @@ +package volume + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] VOLUME [VOLUME...]", + Short: "Display detailed information on one or more volumes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getVolFunc := func(name string) (interface{}, []byte, error) { + i, err := client.VolumeInspect(ctx, name) + return i, nil, err + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) +} diff --git a/vendor/github.com/docker/cli/cli/command/volume/list.go b/vendor/github.com/docker/cli/cli/command/volume/list.go new file mode 100644 index 0000000..d9b5ef8 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/volume/list.go @@ -0,0 +1,73 @@ +package volume + +import ( + "sort" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type byVolumeName []*types.Volume + +func (r byVolumeName) Len() int { return len(r) } +func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byVolumeName) Less(i, j int) bool { + return r[i].Name < r[j].Name +} + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + options := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, options) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display volume names") + flags.StringVar(&options.format, "format", "", "Pretty-print volumes using a Go template") + flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") + + return cmd +} + +func runList(dockerCli command.Cli, options listOptions) error { + client := dockerCli.Client() + volumes, err := client.VolumeList(context.Background(), options.filter.Value()) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().VolumesFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byVolumeName(volumes.Volumes)) + + volumeCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewVolumeFormat(format, options.quiet), + } + return formatter.VolumeWrite(volumeCtx, volumes.Volumes) +} diff --git a/vendor/github.com/docker/cli/cli/command/volume/prune.go b/vendor/github.com/docker/cli/cli/command/volume/prune.go new file mode 100644 index 0000000..157f883 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/volume/prune.go @@ -0,0 +1,78 @@ +package volume + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type pruneOptions struct { + force bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for volumes +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'label=